date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | dirk-weimar/confluence-chatbot | module~shared.py | import openai
import tiktoken
# ------------- Shared variables ------------- #
tokenizer_encoding_name = 'cl100k_base'
embedding_model = 'text-embedding-ada-002'
# ------------------ Config ------------------ #
max_tokens_response = 400 # adjust for longer or shorter answers from the chatbot
tokens_system_message = 150 # adjust if you change system message
tokens_context_message = 15 # adjust if you change context message
tokens_meta_infos = 135 # for example 'role: system' in chat messages
max_tokens_completion_model = 4096 # adjust if you change completion model
max_num_tokens = max_tokens_completion_model \
- max_tokens_response \
- tokens_system_message \
- tokens_context_message \
- tokens_meta_infos
# ------------- Shared functions ------------- #
def get_file_name_for_space(file_name: str, space: str) -> str:
return file_name + '_' + space + '.csv'
def create_embeddings(text: str, model: str, indicate_progress = False) -> list:
result = openai.Embedding.create(model = model, input = text)
if(indicate_progress):
print(".", end = "", flush = True)
return result["data"][0]["embedding"]
| [] |
2024-01-10 | OscarGO14/huggingChat | chatHugg.py |
import os
import constants
import sys
from langchain.document_loaders import TextLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chains import ConversationalRetrievalChain
from langchain.indexes import VectorstoreIndexCreator
from langchain.indexes.vectorstore import VectorStoreIndexWrapper
from langchain import HuggingFaceHub
from langchain.vectorstores import Chroma
# Importamos las apikeys con os
os.environ["HUGGINGFACEHUB_API_TOKEN"] = constants.HUGGINGFACEHUB_API_TOKEN
PERSIST = False
query = None
if len(sys.argv) > 1:
query = sys.argv[1]
if PERSIST and os.path.exists("persist"):
print("Reusing index...\n")
vectorstore = Chroma(persist_directory="persist", embedding_function=HuggingFaceEmbeddings())
index = VectorStoreIndexWrapper(vectorstore=vectorstore)
else:
loader = TextLoader("data/data.txt")
if PERSIST:
index = VectorstoreIndexCreator(vectorstore_kwargs={"persist_directory":"persist"}).from_loaders([loader])
else:
index = VectorstoreIndexCreator(embedding=HuggingFaceEmbeddings()).from_loaders([loader])
llm = HuggingFaceHub(repo_id="declare-lab/flan-alpaca-large", model_kwargs={"temperature": 0.1,"max_length": 64})
chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=index.vectorstore.as_retriever(search_kwargs={"k": 1}),
)
chat_history = []
while True:
if not query:
query = input("Prompt: ")
if query in ['quit', 'q', 'exit']:
sys.exit()
result = chain({"question": query, "chat_history": chat_history})
print(result['answer'], len(result["answer"]))
chat_history.append((query, result['answer']))
query = None
| [] |
2024-01-10 | akvelon/study-assistant | se_indexing~indexer.py | """DB INDEXER"""
import os
import pickle
import json
import glob
import multiprocessing
import openai
from db_engine.config import get_database
from settings import settings
def get_documents():
"""Find all documents from the documents/ folder and return absolute path."""
documents_folder = os.path.join(os.getcwd(), "se_indexing/documents/")
# Find all files in documents/ folder
document_files = glob.glob(os.path.join(documents_folder, "*.json"), recursive=True)
for document in document_files:
yield document
# return document_files
# TODO: Implement deleting a document when finished indexing
def create_summary_for_content(content):
"""Creates summary using content for document"""
max_tokens = 4097
summary_word_count = 100 # words
tokens_per_word = 100 / 75 # tokens per word
summary_token_count = int(summary_word_count * tokens_per_word) # words
messages = [
# Prepare ChatGPT for summarizing
{
"role": "system",
# pylint: disable=line-too-long
"content": """Summarize the following content as concisely as possible. Max word count is 400.""",
},
{
"role": "user",
"content": content[: max_tokens - summary_token_count],
},
]
gpt_response = openai.ChatCompletion.create(
model=settings.chatgpt_model,
messages=messages,
)
summary = gpt_response.choices[0].message.content
return summary
def create_embedding_for_summary(summary):
"""Creates embedding for summary"""
embedding_reply = openai.Embedding.create(
model=settings.embedding_model,
input=summary,
)
embedding_list = embedding_reply["data"][0]["embedding"]
# Serialize embedding_list into a bytestream
embedding = pickle.dumps(embedding_list, pickle.HIGHEST_PROTOCOL)
return embedding
def process_document(document_abspath):
"""Document indexing func"""
database = get_database()
with open(document_abspath, "r", encoding="utf-8") as document_file:
print(f"Processing {document_file.name}")
document = json.load(document_file)
document_content = document["content"]
# Check if document is already in database
if database.find_document_by_url(document["url"]) is None:
document_summary = create_summary_for_content(document_content)
document_embedding = create_embedding_for_summary(document_summary)
document_id = database.insert_document(document)
summary_id = database.insert_summary(document_id, document_summary)
database.insert_embedding(
summary_id, settings.embedding_model, document_embedding
)
# TODO: Delete document from documents/ folder
# delete_document(document_filename)
def main():
"""Main func"""
with get_database() as database:
database.create_database_if_not_exists()
openai.api_key = settings.openai_key
with multiprocessing.Pool(processes=8) as pool:
for document_abspath in get_documents():
document_filename = os.path.basename(document_abspath)
print(f"Indexing {document_filename}")
# this is so awesome
pool.apply_async(process_document, args=(document_abspath,))
pool.close()
pool.join()
main()
| [
"Summarize the following content as concisely as possible. Max word count is 400."
] |
2024-01-10 | akvelon/study-assistant | api~assistants~study_assistant~study_assistant.py | """Study assistant"""
import time
import io
from pydantic import BaseSettings
import openai
from settings import settings
from api.endpoints.search import search_engine
from api.endpoints.user import User
from api.endpoints.schemas import (
MessageAttachment,
Message,
MessagesRequest,
MessagesResponse,
)
from api.assistants.history.manager import HistoryManager
from api.db.schools import schools_db
history_manager = HistoryManager()
openai.api_key = settings.openai_key
def parse_prompt(file: str) -> str:
"""Loads prompts for Chat"""
with open(file, "r", encoding="utf-8") as promptfile:
prompt = promptfile.read()
return prompt
class StudyAssistantSettings(BaseSettings):
"""read file and return as string"""
prompt = parse_prompt("api/assistants/study_assistant/study_assistant.txt")
model: str = "gpt-3.5-turbo"
greeting: str = ""
temperature: float = 0.8
max_tokens: int = 400
prompt: str = prompt
name: str = "Study Assistant"
description: str = ""
category: str = "language"
class StudyAssistant(StudyAssistantSettings):
"""Study assistant class"""
async def generate_response(
self,
request: MessagesRequest,
user: User,
school_id: int,
) -> MessagesResponse:
"""Generates response for answer"""
# Places the system prompt at beginning of list
messages = [{"role": "system", "content": self.prompt}]
# Appends the rest of the conversation
for message in request.messages:
messages.append({"role": message.role, "content": message.content})
documents = await search_engine.search_text_vectors(request, school_id)
document = documents[0] if len(documents) else None
if user:
school_name = schools_db.get_school_by_id(user.schoolId)
messages.append(
{
"role": "system",
"content": f"""
For the response, take into account the following information:"
the user studies at the {school_name}
""",
}
)
if document:
messages.append(search_engine.get_system_message(document))
# Generate response
gpt_response = openai.ChatCompletion.create(
model=self.model,
messages=messages,
max_tokens=self.max_tokens,
temperature=self.temperature,
)
attachments = []
# Attach document to user message content
if document:
include_doc = search_engine.should_search_docs(
gpt_response["choices"][0]["message"]["content"],
0.5,
school_id,
[document.document],
)
if include_doc:
image_src = (
document.document.image_metadata[0]["src"]
if document.document.image_metadata
else None
)
attachments.append(
MessageAttachment(
id=document.document.id,
title=document.document.title,
summary=document.document.summary,
url=document.document.url,
image=image_src,
)
)
# Convert to Message schema
response_message = Message(
id=gpt_response["id"],
role=gpt_response["choices"][0]["message"]["role"],
timestamp=time.time(),
content=gpt_response["choices"][0]["message"]["content"],
attachments=attachments,
)
return history_manager.process_messages(request, response_message, user)
async def generate_response_audio(
self, audio_file, chat_id: int | None, user: User | None, school_id: int
) -> MessagesResponse:
"""Generate response for audio question."""
# Convert audio file to byte stream, due to unknown reasons
# OpenAI does not accept SpooledTemporaryFile's
buffer = io.BytesIO(audio_file.file.read())
buffer.name = audio_file.filename
# Transcribe audio byte stream to text
transcript = openai.Audio.transcribe("whisper-1", buffer, response_format="text")
# Start new conversation if no chat_id is present
if chat_id is None:
request = MessagesRequest(
messages=[
Message(
id="1",
role="user",
timestamp=time.time(),
content=transcript,
)
]
)
return await self.generate_response(request, user, school_id)
# Otherwise append transcription to existing chat history
# Attempt to get chat history
chat_history = history_manager.get_history(chat_id, user.id)
request = MessagesRequest(
chat=chat_history.chat,
messages=chat_history.messages,
)
request.messages.append(
Message(
id=str(len(request.messages) + 1),
role="user",
timestamp=time.time(),
content=transcript,
)
)
return await self.generate_response(request, user, school_id)
class UsersMessageMissingException(Exception):
"""Missing exception class"""
| [
"content",
"\n For the response, take into account the following information:\"\n the user studies at the PLACEHOLDER\n ",
"api/assistants/study_assistant/study_assistant.txt"
] |
2024-01-10 | akvelon/study-assistant | api~assistants~quick_replies~quick_replies.py | """Quick Replies assistant functions"""
import openai
def parse_prompt(file: str) -> str:
"""Loads prompts for quick replies"""
with open(file, "r", encoding="utf-8") as promptfile:
prompt = promptfile.read()
return prompt
async def generate_quick_replies(message: str) -> list[str]:
"""Generates quick replies"""
prompt = parse_prompt("api/assistants/quick_replies/quick_replies.txt")
# Input gpt response
messages = [
{"role": "system", "content": prompt},
{"role": "assistant", "content": message},
]
# Generate open ai response
gpt_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
# Extract response message as string
response_message = gpt_response["choices"][0].message["content"]
# Split string into list of sub str and return
quick_replies = response_message.split("//")
return quick_replies
| [
"api/assistants/quick_replies/quick_replies.txt"
] |
2024-01-10 | rickli92/EQcorrscan | eqcorrscan~tests~brightness_test.py | """
A series of test functions for the core.bright_lights module in EQcorrscan.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
class BrightnessTestMethods(unittest.TestCase):
def test_read_tt(self):
from eqcorrscan.core.bright_lights import _read_tt
import os
testing_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'test_data') + os.sep
# Test reading S from S
stations, nodes, lags = _read_tt(path=testing_path, stations=['COSA'],
phase='S', phaseout='S')
self.assertEqual(stations[0], 'COSA')
self.assertEqual(len(nodes), len(lags[0]))
# Test reading P from S
stations, nodes, lags = _read_tt(path=testing_path, stations=['COSA'],
phase='S', phaseout='P')
self.assertEqual(stations[0], 'COSA')
self.assertEqual(len(nodes), len(lags[0]))
# Test reading S from P
stations, nodes, lags = _read_tt(path=testing_path, stations=['COSA'],
phase='P', phaseout='S')
self.assertEqual(stations[0], 'COSA')
self.assertEqual(len(nodes), len(lags[0]))
# Test reading P from P
stations, nodes, lags = _read_tt(path=testing_path, stations=['COSA'],
phase='P', phaseout='P')
self.assertEqual(stations[0], 'COSA')
self.assertEqual(len(nodes), len(lags[0]))
def test_resample_grid(self):
from eqcorrscan.core.bright_lights import _read_tt, _resample_grid
from matplotlib import path
import os
minlon = 168
maxlon = 170
minlat = -46
maxlat = -43
mindepth = 4
maxdepth = 10
testing_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'test_data') + os.sep
stations, allnodes, alllags = _read_tt(path=testing_path,
stations=['COSA'],
phase='S', phaseout='S')
corners = [(minlon, minlat),
(maxlon, minlat),
(maxlon, maxlat),
(minlon, maxlat)]
corners = path.Path(corners, closed=True)
stations, nodes, lags = _resample_grid(stations, allnodes, alllags,
mindepth=mindepth,
maxdepth=maxdepth,
corners=corners)
for node in nodes:
self.assertTrue(minlon < node[0] < maxlon)
self.assertTrue(minlat < node[1] < maxlat)
self.assertTrue(mindepth < node[2] < maxdepth)
for node in allnodes:
if node not in nodes:
self.assertFalse((minlon < node[0] < maxlon) and
(minlat < node[1] < maxlat) and
(mindepth < node[2] < maxdepth))
def test_rm_similarlags(self):
from eqcorrscan.core.bright_lights import _read_tt, _rm_similarlags
import os
import numpy as np
threshold = 2
testing_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'test_data') + os.sep
stations, allnodes, alllags = _read_tt(path=testing_path,
stations=['COSA'],
phase='S', phaseout='S')
stations, nodes, lags = _rm_similarlags(stations=stations,
nodes=allnodes,
lags=alllags,
threshold=threshold)
for lag in lags:
for _lag in lag:
other_lags = np.array([l for l in lag if not l == _lag])
self.assertTrue(np.all(np.abs(other_lags - _lag) > threshold))
def test_rms(self):
from eqcorrscan.core.bright_lights import _rms
import numpy as np
rms = _rms(np.zeros(1000) + 1)
self.assertEqual(rms, 1)
rms = _rms(np.random.randn(10000))
self.assertEqual(round(rms), 1)
def test_node_loop(self):
from eqcorrscan.core.bright_lights import _node_loop, _read_tt
import os
from obspy import Stream, Trace
import numpy as np
testing_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'test_data') + os.sep
stations, nodes, lags = _read_tt(path=testing_path,
stations=['COSA', 'LABE'],
phase='S', phaseout='S')
st = Stream(Trace())
st[0].stats.station = stations[0]
st[0].data = np.random.randn(86400) * 3000
st[0].data = st[0].data.astype(np.int16)
st += Trace(np.random.randn(86400) * 3000)
st[1].stats.station = 'LABE'
index, energy = _node_loop(stations=stations, lags=lags[:, 1],
stream=st, clip_level=4)
self.assertEqual(index, 0)
self.assertEqual(np.shape(energy), (1, 86400))
def test_cum_net_resp(self):
from eqcorrscan.core.bright_lights import _cum_net_resp
from eqcorrscan.core.bright_lights import _node_loop, _read_tt
import os
from obspy import Stream, Trace
import numpy as np
testing_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'test_data') + os.sep
stations, nodes, lags = _read_tt(path=testing_path,
stations=['COSA', 'LABE'],
phase='S', phaseout='S')
st = Stream(Trace())
st[0].stats.station = stations[0]
st[0].data = np.random.randn(86400) * 3000
st[0].data = st[0].data.astype(np.int16)
st += Trace(np.random.randn(86400) * 3000)
st[1].stats.station = stations[1]
index, energy_file = _node_loop(stations=stations, lags=lags[:, 1],
stream=st, clip_level=4,
mem_issue=True)
self.assertEqual(index, 0)
self.assertTrue(type(energy_file) == str)
cum_net_resp, indeces = _cum_net_resp(node_lis=[0])
self.assertEqual(len(cum_net_resp), 86400)
self.assertEqual(len(indeces), 86400)
def test_find_detections(self):
from eqcorrscan.core.bright_lights import _find_detections
from eqcorrscan.core.bright_lights import _cum_net_resp
from eqcorrscan.core.bright_lights import _node_loop, _read_tt
import os
from obspy import Stream, Trace
import numpy as np
testing_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'test_data') + os.sep
stations, nodes, lags = _read_tt(path=testing_path,
stations=['COSA', 'LABE'],
phase='S', phaseout='S')
st = Stream(Trace())
st[0].stats.station = stations[0]
st[0].data = np.random.randn(86400) * 3000
st[0].data = st[0].data.astype(np.int16)
st += Trace(np.random.randn(86400) * 3000)
st[1].stats.station = stations[1]
index, energy_file = _node_loop(stations=stations, lags=lags[:, 1],
stream=st, clip_level=4,
mem_issue=True)
cum_net_resp, indeces = _cum_net_resp(node_lis=[0])
all_nodes = [nodes[1] for i in range(len(cum_net_resp))]
detections = _find_detections(cum_net_resp=cum_net_resp,
nodes=all_nodes, threshold=10,
thresh_type='MAD', samp_rate=1,
realstations=[tr.stats.station
for tr in st],
length=10)
self.assertEqual(len(detections), 0)
detections = _find_detections(cum_net_resp=cum_net_resp,
nodes=all_nodes, threshold=5,
thresh_type='MAD', samp_rate=1,
realstations=[tr.stats.station
for tr in st],
length=10)
self.assertTrue(len(detections) > 0)
def test_coherence(self):
from eqcorrscan.core.bright_lights import coherence
from obspy import read
import os
testing_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'test_data', 'WAV', 'TEST_',
'2013-09-01-0410-35.DFDPC_024_00')
st = read(testing_path)
coh = coherence(stream_in=st)
self.assertTrue(type(coh), float)
coh = coherence(stream_in=st, clip=(0.5, 10))
self.assertTrue(type(coh), float)
coh = coherence(stream_in=st, stations=[tr.stats.station
for tr in st[0:-5]])
self.assertTrue(type(coh), float)
def test_brightness(self):
from eqcorrscan.core.bright_lights import brightness, _read_tt
import os
from obspy import Stream, Trace
import numpy as np
testing_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'test_data') + os.sep
# Test reading S from S
stations, nodes, lags = _read_tt(path=testing_path,
stations=['COSA', 'LABE'],
phase='S', phaseout='S')
st = Stream(Trace())
st[0].stats.station = stations[0]
st[0].data = np.random.randn(86400) * 3000
st[0].data = st[0].data.astype(np.int16)
st += Trace(np.random.randn(86400) * 3000)
st[1].stats.station = stations[1]
st[1].stats.channel = 'HHZ'
st[0].stats.channel = 'HHZ'
detections, nodes_out = brightness(stations=stations, nodes=nodes,
lags=lags, stream=st,
threshold=1.885,
thresh_type='MAD',
template_length=1,
template_saveloc='.',
coherence_thresh=(10, 1))
self.assertEqual(len(detections), 0)
self.assertEqual(len(detections), len(nodes_out))
if __name__ == '__main__':
"""
Run core tests
"""
unittest.main()
| [] |
2024-01-10 | Downtownitem/STAR-API | AI~artificial_intelligence.py | import os
import openai
from typing import List, Dict
from AI.functions import get_function_list, execute_function
import json
class OpenAI:
def __init__(self):
openai.api_key = os.getenv('OPENAI_API_KEY')
self.functions = get_function_list()
self.messages = [
{
'role': 'system',
'content': 'You are Star the artificial intelligence assistant of the Nasa. Always use the function "get_information" to get the information. Act gentlemanly.'
}
]
def set_messages(self, messages: List[Dict[str, str]]):
self.messages = messages
def add_message(self, message: Dict[str, str]):
self.messages.append(message)
def generate_completion(self):
function_call_end = True
while function_call_end:
function_call_end = False
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=self.messages,
functions=self.functions,
function_call="auto",
max_tokens=500,
stream=True
)
actual_content = ""
actual_function_name = ""
actual_function_arguments = ""
for message in response:
# Function call
if "function_call" in message["choices"][0]["delta"]:
if "name" in message["choices"][0]["delta"]["function_call"]:
actual_function_name = message["choices"][0]["delta"]["function_call"]["name"]
actual_function_arguments = ""
if "arguments" in message["choices"][0]["delta"]["function_call"]:
actual_function_arguments += message["choices"][0]["delta"]["function_call"]["arguments"]
# Content of the message
if "content" in message["choices"][0]["delta"]:
if message["choices"][0]["delta"]["content"] is not None:
actual_content += message["choices"][0]["delta"]["content"]
yield str({"type": "message", "content": message["choices"][0]["delta"]["content"]})
# Conversation end condition
if message["choices"][0]["finish_reason"] is not None:
if message["choices"][0]["finish_reason"] == "function_call":
function_call_end = True
break
if function_call_end:
actual_function_arguments = json.loads(actual_function_arguments)
execution = execute_function(actual_function_name, actual_function_arguments)
response = None
for i in execution:
if i['type'] == 'function':
yield str({'type': 'function', 'content': i['content']})
else:
response = {'type': 'message', 'content': i['content']}
self.add_message({"role": "function", "name": actual_function_name, "content": str(response)})
else:
self.add_message({"role": "assistant", "content": actual_content})
def __str__(self):
return str(self.messages)
def __repr__(self):
return str(self.messages)
| [
"content",
"You are Star the artificial intelligence assistant of the Nasa. Always use the function \"get_information\" to get the information. Act gentlemanly."
] |
2024-01-10 | Downtownitem/STAR-API | AI~function_execution.py | import os
import json
from typing import List, Dict
import openai
import threading
from Database.Vectorial.vector_database import FullControl
openai.api_key = os.getenv('OPENAI_API_KEY')
control = FullControl()
def get_one_response(text: List[Dict[str, str]]):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=text,
max_tokens=500
)
return response['choices'][0]['message']['content']
def get_suggestions(text):
try:
text = get_one_response([
{
'role': 'user',
'content': """Answer just with the following json format:
{
"suggested_process": "<Recommended procedure>", (Required)
"process_steps": [<Step 1>, <Step 2>, ...], (Required)
"extra": [<Extra suggestion>, ...] (Not required)
}
Create suggestions that are related and that can help to complete this task/answer this question:\n""" + text
}
])
text = text[text.find('{'):text.rfind('}') + 1]
json_text = json.loads(text)
if 'suggested_process' not in json_text or 'process_steps' not in json_text:
return {'suggested_process': 'No suggestions found', 'process_steps': [], 'extra': []}
else:
return json_text
except:
return {'suggested_process': 'No suggestions found', 'process_steps': [], 'extra': []}
def get_secondary_questions(text):
try:
text = get_one_response([
{
'role': 'user',
'content': """Answer with the following json format:
{
"related_questions": [<Question>, <Question>, ...]
}
In case it is a text create 3 questions that are related to it, in case it is a question create 3 related questions with secondary doubts that can help to answer the main one:\n""" + text
}
])
text = text[text.find('{'):text.rfind('}') + 1]
json_text = json.loads(text)
if 'related_questions' not in json_text:
return {'related_questions': []}
else:
return json_text
except:
return {'related_questions': []}
def get_information_about(question, suggestion):
final_response = {}
def get_suggestion_thread(suggestion):
suggestion = get_suggestions(suggestion)
final_response['suggestions'] = suggestion
if suggestion is not None:
yield {'type': 'function', 'content': 'Creating for suggestions'}
suggestion_thread = threading.Thread(target=get_suggestion_thread, args=(suggestion,))
suggestion_thread.start()
if question is not None:
yield {'type': 'function', 'content': 'Creating related questions'}
secondary = get_secondary_questions(question)
secondary_solutions = []
for i in secondary['related_questions']:
yield {'type': 'function', 'content': 'Searching for ' + str(i)}
temp = {
'question': i,
'search_result': control.query(i, top_k=1),
}
secondary_solutions.append(temp)
final_response['related_questions'] = secondary_solutions
if suggestion is not None:
suggestion_thread.join()
yield {'type': 'function', 'content': 'Searching for ' + question}
final_response['main_question'] = {
'question': question,
'search_result': control.query(question, top_k=1),
}
yield {'type': 'message', 'content': final_response}
| [
"Answer just with the following json format:\n {\n \"suggested_process\": \"<Recommended procedure>\", (Required)\n \"process_steps\": [<Step 1>, <Step 2>, ...], (Required)\n \"extra\": [<Extra suggestion>, ...] (Not required)\n }\n \n Create suggestions that are related and that can help to complete this task/answer this question:\nPLACEHOLDER",
"Answer with the following json format:\n {\n \"related_questions\": [<Question>, <Question>, ...]\n}\n\nIn case it is a text create 3 questions that are related to it, in case it is a question create 3 related questions with secondary doubts that can help to answer the main one:\nPLACEHOLDER",
"Searching for PLACEHOLDER",
"Creating for suggestions",
"Creating related questions"
] |
2024-01-10 | ZhangWei1993/gym-gazebo | gym_gazebo~envs~gazebo_env.py | import gym
import rospy
#import roslaunch
import os
import signal
import subprocess
import time
from os import path
from std_srvs.srv import Empty
import random
class GazeboEnv(gym.Env):
"""Superclass for all Gazebo environments.
"""
metadata = {'render.modes': ['human']}
def __init__(self, launchfile):
random_number = random.randint(10000, 15000)
self.port = "11311"#str(random_number) #os.environ["ROS_PORT_SIM"]
self.port_gazebo = str(random_number+1) #os.environ["ROS_PORT_SIM"]
# os.environ["ROS_MASTER_URI"] = "http://localhost:"+self.port
# os.environ["GAZEBO_MASTER_URI"] = "http://localhost:"+self.port_gazebo
#
# self.ros_master_uri = os.environ["ROS_MASTER_URI"];
with open("log.txt", "a") as myfile:
myfile.write("export ROS_MASTER_URI=http://localhost:"+self.port + "\n")
myfile.write("export GAZEBO_MASTER_URI=http://localhost:"+self.port_gazebo + "\n")
#start roscore
subprocess.Popen(["roscore", "-p", self.port])
time.sleep(1)
print ("Roscore launched!")
# Launch the simulation with the given launchfile name
rospy.init_node('gym', anonymous=True)
if launchfile.startswith("/"):
fullpath = launchfile
else:
fullpath = os.path.join(os.path.dirname(__file__), "assets","launch", launchfile)
if not path.exists(fullpath):
raise IOError("File "+fullpath+" does not exist")
subprocess.Popen(["roslaunch","-p", self.port, fullpath])
print ("Gazebo launched!")
self.gzclient_pid = 0
def set_ros_master_uri(self):
os.environ["ROS_MASTER_URI"] = self.ros_master_uri
def _step(self, action):
# Implement this method in every subclass
# Perform a step in gazebo. E.g. move the robot
raise NotImplementedError
def _reset(self):
# Implemented in subclass
raise NotImplementedError
def _render(self, mode="human", close=False):
if close:
tmp = os.popen("ps -Af").read()
proccount = tmp.count('gzclient')
if proccount > 0:
if self.gzclient_pid != 0:
os.kill(self.gzclient_pid, signal.SIGTERM)
os.wait()
return
tmp = os.popen("ps -Af").read()
proccount = tmp.count('gzclient')
if proccount < 1:
subprocess.Popen("gzclient")
self.gzclient_pid = int(subprocess.check_output(["pidof","-s","gzclient"]))
else:
self.gzclient_pid = 0
def _close(self):
# Kill gzclient, gzserver and roscore
tmp = os.popen("ps -Af").read()
gzclient_count = tmp.count('gzclient')
gzserver_count = tmp.count('gzserver')
roscore_count = tmp.count('roscore')
rosmaster_count = tmp.count('rosmaster')
if gzclient_count > 0:
os.system("killall -9 gzclient")
if gzserver_count > 0:
os.system("killall -9 gzserver")
if rosmaster_count > 0:
os.system("killall -9 rosmaster")
if roscore_count > 0:
os.system("killall -9 roscore")
if (gzclient_count or gzserver_count or roscore_count or rosmaster_count >0):
os.wait()
def _configure(self):
# TODO
# From OpenAI API: Provides runtime configuration to the enviroment
# Maybe set the Real Time Factor?
pass
def _seed(self):
# TODO
# From OpenAI API: Sets the seed for this env's random number generator(s)
pass
| [] |
2024-01-10 | prixingcha/voice-narrator | narrator.py | import os
import openai
import base64
import json
import time
import simpleaudio as sa
import errno
from elevenlabs import generate, play, set_api_key, voices
from dotenv import load_dotenv
import os
load_dotenv()
ELEVENLABS_API_KEY = os.getenv('ELEVENLABS_API_KEY')
ELEVENLABS_VOICE_ID = os.getenv('ELEVENLABS_VOICE_ID')
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# print(OPENAI_API_KEY)
set_api_key(ELEVENLABS_API_KEY)
def encode_image(image_path):
while True:
try:
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
except IOError as e:
if e.errno != errno.EACCES:
# Not a "file in use" error, re-raise
raise
# File is being written to, wait a bit and retry
time.sleep(0.1)
def play_audio(text):
# print(os.environ.get("ELEVENLABS_VOICE_ID"))
# audio = generate(text, voice=os.environ.get("ELEVENLABS_VOICE_ID"))
audio = generate(text, voice= ELEVENLABS_VOICE_ID)
# unique_id = base64.urlsafe_b64encode(os.urandom(30)).decode("utf-8").rstrip("=")
# dir_path = os.path.join("narration", unique_id)
# os.makedirs(dir_path, exist_ok=True)
# file_path = os.path.join(dir_path, "audio.wav")
# with open(file_path, "wb") as f:
# f.write(audio)
# play(audio)
def generate_new_line(base64_image):
return [
{
"role": "user",
"content": [
{"type": "text", "text": "Describe this image"},
{
"type": "image_url",
"image_url": f"data:image/jpeg;base64,{base64_image}",
},
],
},
]
def analyze_image(base64_image, script):
response = openai.chat.completions.create(
model="gpt-4-vision-preview",
# You are Sir David Attenborough. Narrate the picture of the human as if it is a nature documentary.
messages=[
{
"role": "system",
"content": """
You are Matt Damon the famous actor and famous documentary narrator, generate a narration of documentary aboutMt. Everest of Nepal.
Make it snarky and funny. Don't repeat yourself. Make it short. If I do anything remotely interesting, make a big deal about it!
""",
},
]
+ script
+ generate_new_line(base64_image),
max_tokens=500,
)
response_text = response.choices[0].message.content
return response_text
def main():
script = []
while True:
# strValue ="""
# You are Matt Damon the famous actor and famous documentary narrator, generate a narration of documentary about mount everst Mt. Everest of Nepal.
# Make it snarky and funny. Don't repeat yourself. also make it compelling narrative and authentic dialogue in the.
# """
strValue ="""
Now, let’s talk about the history of this mountain. It was first conquered by Sir Edmund Hillary and Tenzing Norgay in 1953. And since then, it has become a popular destination for thrill-seekers and Instagram influencers alike. But let’s be real, most of us will never climb this mountain. We’ll just watch documentaries about it and pretend we’re there.
Speaking of documentaries, did you know that there are more documentaries about Mount Everest than there are people who have actually climbed it? That’s right, you can watch hours and hours of people freezing their butts off and risking their lives for a chance to stand on top of a mountain. And if you’re lucky, you might even get to see someone take a dump in a bucket. Now that’s what I call entertainment.
But in all seriousness, climbing Mount Everest is no joke. It’s dangerous, it’s expensive, and it’s not for the faint of heart. But if you’re up for the challenge, it can be a life-changing experience. Just don’t forget to bring a warm jacket and a selfie stick.
I hope you enjoyed this snarky and funny documentary about Mount Everest. And remember, if you ever decide to climb this mountain, don’t forget to take a selfie at the top. It’s the ultimate flex.
"""
# exit()
# play_audio('')
# exit()
# path to your image
image_path = os.path.join(os.getcwd(), "./frames/frame.jpg")
# getting the base64 encoding
base64_image = encode_image(image_path)
# analyze posture
print("👀 Matt is watching...")
analysis = analyze_image(base64_image, script=script)
print("🎙️ Matt says:")
print(analysis)
play_audio(analysis)
script = script + [{"role": "assistant", "content": analysis}]
# wait for 5 seconds
# time.sleep(5)
if __name__ == "__main__":
main()
| [
"[{'type': 'text', 'text': 'Describe this image'}, {'type': 'image_url', 'image_url': 'data:image/jpeg;base64,PLACEHOLDER'}]",
"\n You are Matt Damon the famous actor and famous documentary narrator, generate a narration of documentary aboutMt. Everest of Nepal.\n Make it snarky and funny. Don't repeat yourself. Make it short. If I do anything remotely interesting, make a big deal about it!\n "
] |
2024-01-10 | bchewy/smulib_fatgpt | backend_api.py | import openai
import PyPDF2
import re
import json
import requests
# import cohere
import json
from unpywall import Unpywall
from unpywall.utils import UnpywallCredentials
from dotenv import load_dotenv
import os
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
scopusKey = os.getenv("SCOPUS_API_KEY")
primoAPI = os.getenv("PRIMO_API_KEY")
UnpywallCredentials("[email protected]")
# Loop through all the retrived DOIs from Scopus/Semantic Scholar to check if there are OpenAccess Articles
def CheckOpenAccess(titleDOI, username):
count=0
for book in titleDOI:
try:
count+=1
print("file ",count,": "+book[1])
response = requests.get(Unpywall.get_pdf_link(doi=book[1]))
filename = book[0] + ".pdf"
with open(username + "/" + filename, "wb") as f:
f.write(response.content)
except:
print("Sorry, no open access articles found")
# def CheckOpenAccess(titleDOI, username):
# count=0
# for book in titleDOI:
# try:
# count+=1
# print("file ",count,": "+book[1])
# response = requests.get(Unpywall.get_pdf_link(doi=book[1]))
# filename = book[0] + ".pdf"
# test = urllib.urlopen(Unpywall.get_pdf_link(doi=book[1]))
# meta = test.info()
# if (meta.getheader("Content-Length")):
# with open(username + "/" + filename, "wb") as f:
# f.write(response.content)
# except:
# print("Sorry, no open access articles found")
def summarisation(file_directory):
def get_page_text(page):
try:
text = page.extract_text()
except:
text = ""
text = str(text)
text = text.strip()
text = re.sub(r"\W+", " ", text)
return text
def summarize_text(text):
messages = [
{
"role": "system",
"content": "Please provide a 1 sentence summary of the following:",
},
{"role": "user", "content": text},
]
response = openai.ChatCompletion.create(
model="gpt-4-0613", messages=messages
)
return response["choices"][0]["message"]["content"]
def summarize_text2_topic(text):
messages = [
{
"role": "system",
"content": "Provide a keywords for the paragraph. Return in JSON format.",
},
{"role": "user", "content": text},
]
response = openai.ChatCompletion.create(
model="gpt-4-0613", messages=messages
)
return response["choices"][0]["message"]["content"]
def summarise_cohere(text):
response = co.summarize(
text=text,
length="auto",
format="auto",
model="summarize-xlarge",
additional_command="",
temperature=0.8,
)
return response.summary
try:
pdf_file = open(file_directory, "rb")
pdf_reader = PyPDF2.PdfReader(pdf_file)
except:
return "",""
pages = len(pdf_reader.pages)
print(f"Total Pages: {pages}")
page_summaries = []
page_summary_cohere = []
for page_num in range(pages):
print(f"Summarizing page {page_num+1}...")
page = pdf_reader.pages[page_num]
text = get_page_text(page)
page_summary = summarize_text(text)
# page_ch_summary = summarise_cohere(text)
page_summaries.append(page_summary)
# page_summary_cohere.append(page_summary_cohere)
print(page_summary)
print()
print(page_summary_cohere)
all_summaries = ". ".join(page_summaries)
final_summary = summarize_text(all_summaries)
topics = summarize_text2_topic(final_summary)
# cohere_summary = summarise_cohere(final_summary)
print()
print("OpenAI's Final Summary:")
print(final_summary)
print("Topics Involved:")
print(topics)
# print("Cohere's Final Summary:")
# print(cohere_summary)
pdf_file.close()
return final_summary, json.loads(topics)
# Function for chatting with the GPT-4 based model.
def context(message, chat_context):
if not chat_context:
chat_context = {
"messages": [
{
"role": "system",
"content": "You are a ChatBot that intakes a user's broad academic or professional interest, refines it into a focused area of study or project topic, and then provides personalized resources and a learning pathway tailored to their unique goals. For instance, if a user mentions they're a Biology student but wishes to delve into data analytics, the model will offer resources on bioinformatics and a suggested learning journey",
},
{"role": "user", "content": str(message)},
]
}
else:
chat_context["messages"].append({"role": "user", "content": str(message)})
# write code to do some basic logging for debugging
print("\n")
print(chat_context)
print("\n")
response = openai.ChatCompletion.create(
model="gpt-4-0613", messages=chat_context["messages"]
)
response_message = response["choices"][0]["message"]["content"]
if not response_message:
response_message = "Our brains are on fire now. Please try again later."
# Append response to context
chat_context["messages"].append(
{"role": "assistant", "content": str(response_message)}
)
return response_message, chat_context
# def recommended_readings(topic: str):
# url = "https://api.semanticscholar.org/graph/v1/paper/search?"
# # params = {'query':topic, 'fields':"title,year,authors,externalIds", "limit": 10}
# params = {'query':topic, 'fields':"externalIds", "limit": 10}
# response = requests.get(url, params)
# recs = []
# res_dict = response.json()
# data_dict = res_dict["data"] # This is array of dicts with all info of results
# # print(data_dict)
# for item in data_dict:
# for key in item :
# #print(key)
# if (key == "externalIds"):
# if (item[key].get("DOI")):
# # print(item[key])
# doi = item[key]["DOI"]
# recs.append(doi)
# return recs
def SemanticScholar(topic : str):
# offset: skip first 10 result, limit: limit the number of records output, fields
# query':context.user_data["query"] --> the actual query from the next message
url ="http://api.semanticscholar.org/graph/v1/paper/search"
params = {'query': topic, 'fields' : "title,externalIds,isOpenAccess"}
recs = []
response = requests.get(url, params)
res_dict = response.json()
data_dict = res_dict["data"] # This is array of dicts with all info of results
# print(res_dict["total"])
#print(data_dict)
# Check if there's any results
if (res_dict["total"]>0):
# for item in data_dict:
# for key in item :
# # print(key)
# founddoi
# if (key == "externalIds"):
# if (item[key].get("DOI")):
# doi = item[key]["DOI"]
# title = item["title"]
# recs.append([title,doi])
# return recs
for item in data_dict:
# print(item)
if ("DOI" in item["externalIds"] and item["isOpenAccess"] == True):
title = item["title"]
doi = item["externalIds"]["DOI"]
recs.append([title, doi])
return recs
else:
text="Sorry, we were unable to find any articles relating to " + topic + "."
return text
def scopus(topic: str):
url = "https://api.elsevier.com/content/search/scopus?"
topic += ",OPENACCESS"
params = {"query": topic, "apikey": scopusKey}
response = requests.get(url, params)
recs = []
res_dict = response.json()
# Returns a list of all results
res = res_dict["search-results"]["entry"]
# print(res)
# print(res_dict["search-results"]["opensearch:totalResults"])
if int(res_dict["search-results"]["opensearch:totalResults"]) > 0:
for book in res:
titleDOI = []
if len(recs) > 9:
break
if book.get("prism:doi") and len(recs) < 11:
titleDOI.append(book["dc:title"])
titleDOI.append(book["prism:doi"])
recs.append(titleDOI)
else:
text = "Sorry, we were unable to find any articles relating to " + topic + "."
return text
return recs
def OpenAlexAbstract(doi: str):
url = "https://api.openalex.org/works/"
url += doi
response = requests.get(url)
res_dict = response.json()
# Returns an inverted index/ dict with key of a word that appears with values index of where it appears
abi = res_dict["abstract_inverted_index"]
# Using this to store the max value for each key which in this case is the word
len_index = []
# Add the largest number from each key value into len_index first
for indices in abi.values():
len_index.append(max(indices))
# Find the max value among all the max values in each list
max_index = max(len_index)
# Create a list to store the words in their respective positions
sentence = [""] * (max_index + 1)
# Send each word back into its original position in the sentence
for word, indices in abi.items():
for index in indices:
sentence[index] = word
# Convert the list to a string
reconstructed_sentence = " ".join(sentence)
return reconstructed_sentence
def OpenAlexRelated(topic: str):
# Used for looking for actual concepts reltaed to the search'
url = "https://api.openalex.org/concepts?"
params = {"search": topic}
response = requests.get(url, params)
related = []
res_dict = response.json()
res = res_dict["results"]
for concept in res:
if len(related) < 3:
related.append(concept["display_name"])
return related
def CheckLibrary(titleDOI: list):
# url = "https://api-ap.hosted.exlibrisgroup.com/primo/v1/search?"
found = []
notFound = []
for book in titleDOI:
searchTerm = book[1]
# params = {'vid': "65SMU_INST%3ASMU_NUI", 'tab': "Everything", 'scope': "Everything", 'q': searchTerm, "offset": 0, 'limit':10, 'pcAvailability': 'true', 'INST':"65SMU_INST"}
# params = {'vid': "65SMU_INST%3ASMU_NUI", 'tab': "Everything", 'scope': "Everything", 'q': searchTerm, 'offset': 0, 'limit':10, 'INST':"65SMU_INST", 'apikey': primoAPI}
url = (
"https://api-ap.hosted.exlibrisgroup.com/primo/v1/search?vid=65SMU_INST%3ASMU_NUI&tab=Everything&scope=Everything&q=any,contains,"
+ searchTerm
)
url2 = "&lang=eng&offset=0&limit=10&sort=rank&pcAvailability=true&getMore=0&conVoc=true&inst=65SMU_INST&skipDelivery=true&disableSplitFacets=true&apikey=<apikeyhere>"
response = requests.get(url + url2)
res_dict = response.json()
res = res_dict["info"]
if res["total"] > 0:
found.append([book[0], book[1]])
else:
print
notFound.append([book[0], book[1]])
return (found, notFound)
| [
"Please provide a 1 sentence summary of the following:",
"Provide a keywords for the paragraph. Return in JSON format.",
"You are a ChatBot that intakes a user's broad academic or professional interest, refines it into a focused area of study or project topic, and then provides personalized resources and a learning pathway tailored to their unique goals. For instance, if a user mentions they're a Biology student but wishes to delve into data analytics, the model will offer resources on bioinformatics and a suggested learning journey"
] |
2024-01-10 | jaemil/agentsflow | apps~api~agentsflow~config.py | import openai
import os
from dotenv import load_dotenv
load_dotenv()
def config():
openai.api_key = os.getenv("OPENAI_API_KEY")
| [] |
2024-01-10 | BigDataIA-Fall2023-Team7/Assignment2-QA-Chatbot-PrivateFiles | backend~fastapiservice~src~chatanswer.py | import ast # for converting embeddings saved as strings back to arrays
import openai # for calling the OpenAI API
import pandas as pd # for storing text and embeddings data
from scipy import spatial # for calculating vector similarities for search
from dotenv import load_dotenv
import os
load_dotenv()
EMBEDDING_MODEL = os.getenv('EMBEDDING_MODEL')
FT_MODEL = os.getenv('FT_MODEL')
openai.api_key = os.getenv("OPENAI_API_KEY")
def strings_ranked_by_relatedness(
query: str,
df: pd.DataFrame,
relatedness_fn=lambda x, y: 1 - spatial.distance.cosine(x, y),
top_n: int = 2
) -> tuple[list[str], list[float]]:
"""Returns a list of strings and relatednesses, sorted from most related to least."""
try:
query_embedding_response = openai.Embedding.create(
model=EMBEDDING_MODEL,
input=query,
)
except Exception as e:
print(e)
return ""
query_embedding = query_embedding_response["data"][0]["embedding"]
strings_and_relatednesses = [
(row["context"], relatedness_fn(query_embedding, row["embeddings"]))
for i, row in df.iterrows()
]
strings_and_relatednesses.sort(key=lambda x: x[1], reverse=True)
strings, relatednesses = zip(*strings_and_relatednesses)
return strings[:top_n], relatednesses[:top_n]
def query_message(
query: str,
df: pd.DataFrame,
model: str,
token_budget: int
) -> str:
strings, relatednesses = strings_ranked_by_relatedness(query, df)
question = f"\nQuestion: {query}"
context = ""
for related_context in strings:
context+= f"{related_context}\n"
return context + question
def ask(
question: str,
filepath: str,
model: str = FT_MODEL,
token_budget: int = 4096 - 500,
print_message: bool = True,
) -> str:
df = pd.read_csv(filepath)
df['embeddings'] = df['embeddings'].apply(ast.literal_eval)
message = query_message(question, df, model=model, token_budget=token_budget)
if print_message:
print(message)
messages = [
{"role": "system", "content": "You answer questions using the provided context only"},
{"role": "user", "content": message},
]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0
)
response_message = response["choices"][0]["message"]["content"]
return response_message | [
"You answer questions using the provided context only"
] |
2024-01-10 | Ravoxsg/SummScore | src~candidate_generation~main_candidate_generation.py | # Generate summary candidates with the fine-tuned models.
import os
import time
import argparse
import sys
import torch
import pickle
import datasets
import openai
from tqdm import tqdm
sys.path.append("/data/mathieu/SummScore/src/") # todo: change to your folder path
from common.utils import seed_everything, boolean_string
from common.evaluation import overall_eval
from model_utils import build_tokenizer, build_model
from dataset import Dataset
from engine import beam_search_step
openai.api_key = "xxx" # todo: fill in your OpenAI key here!!
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type = int, default = 42)
parser.add_argument('--cuda', type = boolean_string, default = True)
parser.add_argument('--debug', type = boolean_string, default = False)
parser.add_argument('--debug_size', type = int, default = 10)
parser.add_argument('--few_shot', type = boolean_string, default = True)
# data
parser.add_argument('--dataset_key', type = str, default = "samsum", choices= ["cnndm", "xsum", "wikihow", "samsum"])
# model
parser.add_argument('--model_type', type = str, default = "pegasus", choices=["pegasus", "bart", "chatgpt"])
parser.add_argument('--model_name', type = str, default = "google/pegasus-large,pegasus_unsupervised",
choices = [
# Use case #1: Unsupervised abstractive summarization
"google/pegasus-large,pegasus_unsupervised", "gpt-3.5-turbo,chatgpt",
# Use case #2: Zero-shot transfer
# from CNN/DM
"google/pegasus-cnn_dailymail,pegasus_cnndm", "facebook/bart-large-cnn,bart_cnndm", "Yale-LILY/brio-cnndm-cased,brio_cnndm",
# from XSum
"google/pegasus-xsum,pegasus_xsum", "facebook/bart-large-xsum,bart_xsum", "Yale-LILY/brio-xsum-cased,brio_xsum",
# from WikiHow
"google/pegasus-wikihow,pegasus_wikihow", "our_bart_wikihow,bart_wikihow",
# from SAMSum
"our_pegasus_samsum,pegasus_samsum", "our_bart_samsum,bart_samsum"
])
parser.add_argument('--hidden_size', type = int, default = 768) # 768
parser.add_argument('--cache_dir', type = str, default = "../../../hf_models/pegasus-large/")
parser.add_argument('--load_model_path', type = str, default = "finetuned_checkpoints/our_pegasus_samsum.pt")
# summary generation
parser.add_argument('--val_dataset', type = str, default = "val", choices = ["val", "test"])
parser.add_argument('--max_val_size', type = int, default = 1000)
parser.add_argument('--inference_bs', type = int, default = 6)
parser.add_argument('--save_summaries', type = boolean_string, default = True)
parser.add_argument('--generation_method', type = str, default = "beam_search",
choices = ["beam_search", "diverse_beam_search", "top_p_sampling", "top_k_sampling"])
parser.add_argument('--num_return_sequences', type = int, default = 20) # default: 15
parser.add_argument('--num_beams', type = int, default = 20) # for beam search
parser.add_argument('--num_beam_groups', type = int, default = 20) # for diverse beam search
parser.add_argument('--diversity_penalty', type = float, default = 1.0) # for diverse beam search
parser.add_argument('--top_p', type = float, default = 0.95) # for top-p sampling
parser.add_argument('--top_k', type = int, default = 50) # for top-k sampling
parser.add_argument('--repetition_penalty', type = float, default = 1.0) # for diverse beam search
parser.add_argument('--stemmer', type = boolean_string, default = True)
# metrics
parser.add_argument('--eval_rouge', type = boolean_string, default = True)
parser.add_argument('--eval_bertscore', type = boolean_string, default = False)
parser.add_argument('--eval_new_ngram', type = boolean_string, default = True)
parser.add_argument('--eval_rouge_text', type = boolean_string, default = False)
parser.add_argument('--n_show_summaries', type = int, default=0)
args = parser.parse_args()
dataset_keys = ["cnndm", "xsum", "wikihow", "samsum"]
dataset_names = ["ccdv/cnn_dailymail", "xsum", "wikihow", "samsum"]
dataset_versions = ["3.0.0", "default", "all", "samsum"]
text_keys = ["article", "document", "text", "dialogue"]
summary_keys = ["highlights", "summary", "headline", "summary"]
max_lengths = [1024, 512, 512, 512]
max_summary_lengths = [128, 64, 128, 64]
length_penalties_pegasus = [0.8, 0.8, 0.6, 0.8]
length_penalties_bart = [1.0, 1.0, 1.0, 1.0]
no_repeat_ngram_sizes_pegasus = [0, 3, 0, 0]
no_repeat_ngram_sizes_bart = [3, 3, 3, 3]
ns = [3, 1, 3, 2]
idx = dataset_keys.index(args.dataset_key)
args.dataset_name = dataset_names[idx]
args.dataset_version = dataset_versions[idx]
args.text_key = text_keys[idx]
args.summary_key = summary_keys[idx]
args.max_length = max_lengths[idx]
args.max_summary_length = max_summary_lengths[idx]
if args.model_type == "pegasus":
args.length_penalty = length_penalties_pegasus[idx]
args.no_repeat_ngram_size = no_repeat_ngram_sizes_pegasus[idx]
if "unsupervised" in args.model_name:
args.no_repeat_ngram_size = 3
elif args.model_type == "bart":
args.length_penalty = length_penalties_bart[idx]
args.no_repeat_ngram_size = no_repeat_ngram_sizes_bart[idx]
args.n = ns[idx]
model_name = args.model_name.split(",")
args.model_name = model_name[0]
args.clean_model_name = model_name[1]
print("*"*50)
print(args)
def main(args):
# seed
seed_everything(args.seed)
os.makedirs(f"../../summaries/{args.dataset_key}/{args.val_dataset}/{args.generation_method}/", exist_ok=True)
# device
device = torch.device("cpu")
if args.cuda and torch.cuda.is_available():
device = torch.device("cuda")
args.device = device
print("\nUsing device {}".format(device))
# data
dataset_args = [args.dataset_name, args.dataset_version]
data = datasets.load_dataset(*dataset_args)
val_data = data["validation"]
if args.val_dataset == "test":
val_data = data["test"]
texts = [val_data[i][args.text_key] for i in range(len(val_data))]
labels = [val_data[i][args.summary_key] for i in range(len(val_data))]
# permute
p = pickle.load(open(f"../{args.val_dataset}_permutations/{args.dataset_name}_{args.val_dataset}_permutation.pkl", "rb"))
print(p[:10])
texts = [texts[x] for x in p]
labels = [labels[x] for x in p]
# sample
if args.val_dataset != "test" or args.model_type == "chatgpt":
texts = texts[:args.max_val_size]
labels = labels[:args.max_val_size]
if args.debug:
texts = texts[:args.debug_size]
labels = labels[:args.debug_size]
# run the inference
print(f"Running inference on {len(texts)} data points...")
summaries = []
# you might have to re-run the script 2-3 times to be able to generate summaries on all datapoints
if args.model_type == "chatgpt":
failed_index = []
for i in tqdm(range(len(texts))):
text = texts[i]
prompt = f"Text: {text}.\nSummarize the above text in {args.n} sentence."
try:
responsefromgpt = openai.ChatCompletion.create(
model=f"{args.gpt_model}",
messages=[{"role": "user", "content": prompt}],
max_tokens=args.max_summary_length,
temperature=args.temperature,
top_p=args.top_p,
n=args.num_beams
)
summaries_i = [responsefromgpt['choices'][j]['message']['content'] for j in range(args.num_beams)]
except Exception as exc:
failed_index.append(i)
summaries_i = []
summaries.append(summaries_i)
else:
# tokenizer
tokenizer = build_tokenizer(args)
# datasets
dataset = Dataset(tokenizer, texts, labels, args)
print("Total size of dataset: {}".format(len(texts)))
# data loader
loader = torch.utils.data.DataLoader(dataset, batch_size = args.inference_bs, shuffle = False)
# model
model = build_model(args)
model = model.to(args.device)
# loop
for idx, batch in tqdm(enumerate(loader)):
batch["text_inputs"]["input_ids"] = batch["text_inputs"]["input_ids"].squeeze(1).to(args.device)
batch["text_inputs"]["attention_mask"] = batch["text_inputs"]["attention_mask"].squeeze(1).to(args.device)
summaries_i = beam_search_step(batch, tokenizer, model, args)
summaries += summaries_i
if idx == 0:
print("*"*50)
print(batch["text"][0])
print("*"*20)
print(summaries_i[0])
# evaluation
base_results = [summaries[i][0] for i in range(len(summaries))]
print("*"*100)
print("\nTop beam:")
overall_eval(texts, base_results, labels, args)
# export
num_candidates = len(summaries[0])
if args.save_summaries:
path = f"../../summaries/{args.dataset_key}/{args.val_dataset}/{args.generation_method}/"
with open(path + f"{args.val_dataset}_texts_{len(texts)}_beams_{num_candidates}.pkl", "wb") as f:
pickle.dump(texts, f)
with open(path + f"{args.val_dataset}_summaries_{args.clean_model_name}_{len(texts)}_beams_{num_candidates}.pkl", "wb") as f:
pickle.dump(summaries, f)
with open(path + f"{args.val_dataset}_labels_{len(texts)}_beams_{num_candidates}.pkl", "wb") as f:
pickle.dump(labels, f)
print("saved generated summaries!", path)
if __name__ == '__main__':
main(args)
| [
"f\"Text: {text}.\\nSummarize the above text in {args.n} sentence."
] |
2024-01-10 | Ravoxsg/SummScore | src~summscore~main_build_scores.py | # Generate summary candidates with the fine-tuned models.
import os
import numpy as np
import argparse
import sys
import torch
import pickle
import datasets
import openai
from tqdm import tqdm
sys.path.append("/data/mathieu/SummScore/src/") # todo: change to your folder path
from common.utils import seed_everything, boolean_string
from engine import build_scores
openai.api_key = "xxx" # todo: fill in your OpenAI key here!!
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type = int, default = 42)
parser.add_argument('--cuda', type = boolean_string, default = True)
parser.add_argument('--debug', type = boolean_string, default = False)
parser.add_argument('--debug_size', type = int, default = 10)
parser.add_argument('--few_shot', type = boolean_string, default = True)
# data
parser.add_argument('--dataset_key', type = str, default = "samsum", choices= ["cnndm", "xsum", "wikihow", "samsum"])
parser.add_argument('--generation_methods', type = list, default = [
"beam_search",
#"diverse_beam_search",
#"top_p_sampling",
#"top_k_sampling",
])
# model
parser.add_argument('--model_type', type = str, default="pegasus", choices=["pegasus","bart"])
parser.add_argument('--clean_model_name', type = str, default = "pegasus_unsupervised",
choices = [
# Use case #1: Unsupervised abstractive summarization
"pegasus_unsupervised", "chatgpt",
# Use case #2: Zero-shot transfer
# from CNN/DM
"pegasus_cnndm", "bart_cnndm", "brio_cnndm",
# from XSum
"pegasus_xsum", "bart_xsum", "brio_xsum",
# from WikiHow
"pegasus_wikihow", "bart_wikihow",
# from SAMSum
"pegasus_samsum", "bart_samsum"
])
# summary generation
parser.add_argument('--val_dataset', type = str, default = "val", choices = ["val", "test"])
parser.add_argument('--max_val_size', type = int, default = 1000)
parser.add_argument('--num_beams', type = int, default = 20) # for beam search
# features for SummScore
parser.add_argument('--metrics_to_use', type = dict, default = {
# n-gram overlap with the source
"rouge_1": 1.0,
"rouge_2": 1.0,
"bleu": 1.0,
# semantic similarity with the source
"bert_score": 1.0,
"bart_score": 1.0,
"bleurt": 1.0,
# intrinsic summary quality
"diversity": 1.0,
"length": 1.0,
})
parser.add_argument('--compute_rouge', type = boolean_string, default = True)
parser.add_argument('--compute_bleu', type = boolean_string, default = True)
parser.add_argument('--compute_bertscore', type = boolean_string, default = True)
parser.add_argument('--efficient_bertscore', type = boolean_string, default = False)
parser.add_argument('--n_efficient', type = int, default = 10)
parser.add_argument('--compute_bartscore', type = boolean_string, default = True)
parser.add_argument('--compute_bleurt', type = boolean_string, default = True)
parser.add_argument('--compute_diversity', type = boolean_string, default = True)
parser.add_argument('--compute_length', type = boolean_string, default = True)
parser.add_argument('--stemmer', type = boolean_string, default = True)
args = parser.parse_args()
dataset_keys = ["cnndm", "xsum", "wikihow", "samsum"]
val_sizes = [13368, 11332, 5600, 818]
test_sizes = [11490, 11334, 5580, 819]
ratios = [60.8, 23.21, 23.28, 62.08, 23.42]
idx = dataset_keys.index(args.dataset_key)
if args.val_dataset == "val":
args.val_size = val_sizes[idx]
elif args.val_dataset == "test":
args.val_size = test_sizes[idx]
args.ratio = ratios[idx]
print("*"*50)
print(args)
def main(args):
# seed
seed_everything(args.seed)
# device
device = torch.device("cpu")
if args.cuda and torch.cuda.is_available():
device = torch.device("cuda")
args.device = device
print(f"Using device: {device}")
# load data
size = min(args.val_size, args.max_val_size)
path = f"../../summaries/{args.dataset_key}/{args.val_dataset}/{args.generation_methods[0]}/"
texts_path = path + f"{args.val_dataset}_texts_{size}_beams_{args.num_beams}.pkl"
texts = pickle.load(open(texts_path, "rb"))
summaries_path = path + f"{args.val_dataset}_summaries_{args.clean_model_name}_{size}_beams_{args.num_beams}.pkl"
summaries = pickle.load(open(summaries_path, "rb"))
# build the scores for each summary candidate
all_scores = build_scores(texts, summaries, args)
if __name__ == '__main__':
main(args)
| [] |
2024-01-10 | Unsigned-Research/label-studio-ml | _wsgi.py | import json
import os
import argparse
import logging
import logging.config
logging.config.dictConfig({
"version": 1,
"formatters": {
"standard": {
"format": "[%(asctime)s] [%(levelname)s] [%(name)s::%(funcName)s::%(lineno)d] %(message)s"
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"stream": "ext://sys.stdout",
"formatter": "standard"
}
},
"root": {
"level": "ERROR",
"handlers": [
"console"
],
"propagate": True
}
})
from label_studio_ml.api import init_app
from openai_predictor import OpenAIPredictor
_DEFAULT_CONFIG_PATH = os.path.join(os.path.dirname(__file__), 'config.json')
def get_kwargs_from_config(config_path=_DEFAULT_CONFIG_PATH):
if not os.path.exists(config_path):
return dict()
with open(config_path) as f:
config = json.load(f)
assert isinstance(config, dict)
return config
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Label studio')
parser.add_argument(
'-p', '--port', dest='port', type=int, default=int(os.environ.get("PORT", 9090)),
help='Server port')
parser.add_argument(
'--host', dest='host', type=str, default='0.0.0.0',
help='Server host')
parser.add_argument(
'--kwargs', '--with', dest='kwargs', metavar='KEY=VAL', nargs='+', type=lambda kv: kv.split('='),
help='Additional LabelStudioMLBase model initialization kwargs')
parser.add_argument(
'-d', '--debug', dest='debug', action='store_true',
help='Switch debug mode')
parser.add_argument(
'--log-level', dest='log_level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'], default=None,
help='Logging level')
parser.add_argument(
'--model-dir', dest='model_dir', default=os.path.dirname(__file__),
help='Directory where models are stored (relative to the project directory)')
parser.add_argument(
'--check', dest='check', action='store_true',
help='Validate model instance before launching server')
args = parser.parse_args()
# setup logging level
if args.log_level:
logging.root.setLevel(args.log_level)
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def parse_kwargs():
param = dict()
for k, v in args.kwargs:
if v.isdigit():
param[k] = int(v)
elif v == 'True' or v == 'true':
param[k] = True
elif v == 'False' or v == 'False':
param[k] = False
elif isfloat(v):
param[k] = float(v)
else:
param[k] = v
return param
kwargs = get_kwargs_from_config()
if args.kwargs:
kwargs.update(parse_kwargs())
if args.check:
print('Check "' + OpenAIPredictor.__name__ + '" instance creation..')
model = OpenAIPredictor(**kwargs)
app = init_app(
model_class=OpenAIPredictor,
model_dir=os.environ.get('MODEL_DIR', args.model_dir),
redis_queue=os.environ.get('RQ_QUEUE_NAME', 'default'),
redis_host=os.environ.get('REDIS_HOST', 'localhost'),
redis_port=os.environ.get('REDIS_PORT', 6379),
**kwargs
)
app.run(host=args.host, port=args.port, debug=args.debug)
else:
# for uWSGI use
app = init_app(
model_class=OpenAIPredictor,
model_dir=os.environ.get('MODEL_DIR', os.path.dirname(__file__)),
redis_queue=os.environ.get('RQ_QUEUE_NAME', 'default'),
redis_host=os.environ.get('REDIS_HOST', 'localhost'),
redis_port=os.environ.get('REDIS_PORT', 6379)
) | [] |
2024-01-10 | Patrickyyh/gpt-tool | pdf-dist~app~chat~memories~sql_memory.py | from pydantic import BaseModel
from langchain.memory import ConversationBufferMemory
from langchain.schema import BaseChatMessageHistory
from app.web.api import (
get_messages_by_conversation_id,
add_message_to_conversation
)
# This function inherit the BaseChatMessageHistory
class SqlMessageHistory(BaseChatMessageHistory, BaseModel):
conversation_id: str
## implement the function provided by the abstraction function
@property
def messages(self):
return get_messages_by_conversation_id(self.conversation_id)
def add_message(self, message):
return add_message_to_conversation(
conversation_id=self.conversation_id,
role=message.type,
content=message.content
)
def clear(self):
pass
def build_memory(chat_args):
return ConversationBufferMemory(
chat_memory=SqlMessageHistory(
conversation_id=chat_args.conversation_id
),
return_messages=True,
memory_key="chat_history",
output_key="answer"
)
| [] |
2024-01-10 | Patrickyyh/gpt-tool | db_agents~tools~report.py | from langchain.tools import StructuredTool
from pydantic.v1 import BaseModel
def write_report(filename , html):
with open(filename , "w") as f:
f.write(html)
class WriteReportArgsSchema(BaseModel):
filename: str
html: str
write_report_tool = StructuredTool.from_function(
name = "write_report",
description= "Write an HTML file to disk. Use this tool whenver someone want a report.",
func= write_report,
args_schema=WriteReportArgsSchema
)
| [] |
2024-01-10 | Patrickyyh/gpt-tool | pdf-dist~app~chat~llms~chatopenai.py | from langchain.chat_models import ChatOpenAI
def build_llm(chat_args):
return ChatOpenAI(model="gpt-4-1106-preview" , streaming= chat_args.streaming)
| [] |
2024-01-10 | Patrickyyh/gpt-tool | facts~redundant_fileter_retriever.py | from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import Callbacks
from langchain.embeddings.base import Embeddings
from langchain.schema.document import Document
from langchain.vectorstores import Chroma
from langchain.schema import BaseRetriever
from langchain.embeddings import OpenAIEmbeddings
class RedudantFilterRetriever(BaseRetriever):
## Please provide an already initialized Embeddings
embeddings: Embeddings
## Please provide an already initialized Chroma DB
chroma: Chroma
def get_relevant_documents(self, query):
# Calculate embedings for user's query
emb = self.embeddings.embed_query(query)
# take embeddingsand feed them into the max_marginal_relevance_search_by_vector
return self.chroma.max_marginal_relevance_search_by_vector(
embedding = emb,
lambda_mult = 0.5)
async def get_relevant_documents_async(self, query):
return []
| [] |
2024-01-10 | benthecoder/edupilot | pages~01_%F0%9F%93%9D_summarize.py | import streamlit as st
from utils import generate_word_document
from llama_index import SimpleDirectoryReader
import pathlib
from llama_index.llms import OpenAI
from llama_index import ServiceContext
from llama_index.response_synthesizers import Refine
PROJECT_DIR = pathlib.Path(__file__).parent.parent
st.title("Transcript Summarizer")
content = ""
with st.form("my_form"):
transcript_file = st.selectbox(
"Select a transcript file", options=st.session_state.transcripts
)
# Submit button
submitted = st.form_submit_button("Summarize Transcript")
if submitted:
input_file = PROJECT_DIR / "transcripts" / transcript_file
reader = SimpleDirectoryReader(input_files=[input_file])
docs = reader.load_data()
text = docs[0].text
llm = OpenAI(model="gpt-4")
service_context = ServiceContext.from_defaults(llm=llm)
summarizer = Refine(service_context=service_context, verbose=True)
response = summarizer.get_response(
"Summarize this lecture using the cornell system", [text]
)
st.write(response)
if content != "":
doc_file = generate_word_document(content)
st.download_button(
label="Download summary",
data=doc_file,
file_name="assignment.docx",
mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
)
| [] |
2024-01-10 | benthecoder/edupilot | pages~03_%F0%9F%92%AC_chat.py | import openai
import streamlit as st
from utils import openai_call
import os
st.title("24/7 Virtual Assistant")
GPT_MODEL = "gpt-3.5-turbo-16k"
openai.api_key = st.secrets["OPENAI_API_KEY"]
if "chat_messages" not in st.session_state:
st.session_state.chat_messages = []
if "transcripts" not in st.session_state:
st.session_state.transcripts = [
files for files in os.listdir("transcripts") if files.endswith(".txt")
]
transcript_file = st.selectbox(
"Select a transcript file", options=st.session_state.transcripts
)
with open("transcripts/" + transcript_file, "r") as f:
lecture = f.read()
for message in st.session_state.chat_messages:
if message["role"] != "system":
with st.chat_message(message["role"]):
st.markdown(message["content"])
st.session_state.chat_messages.extend(
[
{
"role": "system",
"content": f"Your task is to answer questions on this lecture: {lecture}.",
},
]
)
if prompt := st.chat_input("What is up?"):
st.session_state.chat_messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
content = openai_call(
st.session_state.chat_messages, message_placeholder, model=GPT_MODEL
)
st.session_state.chat_messages.append({"role": "assistant", "content": content})
| [
"Your task is to answer questions on this lecture: PLACEHOLDER."
] |
2024-01-10 | VarunPTalluri/SkillUp | article_dataset_preprocess~article_cleaner.py | from openai import AzureOpenAI
import re
import numpy as np
import pandas as pd
def clean_article_text(input_file, output_file):
with open("SECRETKEY.txt", "r") as f:
key = f.read()
key = key.split('\n')[0]
client = AzureOpenAI(api_key=key,
azure_endpoint='https://api.umgpt.umich.edu/azure-openai-api/ptu',
api_version='2023-03-15-preview')
indf = pd.read_csv(input_file)
indf = indf.replace("", np.NaN)
indf = indf.dropna()
outdict = {"index": [], "text": [], "url": []}
for index, row in indf.iterrows():
text = re.sub(r'[^a-zA-Z0-9\._-]', '', row['text'])
print(f"this raw article contains {len(text)} characters")
final_response = np.NaN
if (len(text) < 1000):
print(text) #print to understand why there is an error
else:
text = text[:min(len(text), 31000)]
response = client.chat.completions.create(model='gpt-4',
messages=[
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "Remove words that don't relate to the main themes of to the article deliminated by triple quotes\n" + f'"""{text}"""'}
])
final_response = response.choices[0].message.content
outdict["index"].append(index)
outdict["text"].append(final_response)
outdict["url"].append(row['url'])
print(f"article {index} done")
# print the response
outdf = pd.DataFrame(data = outdict)
outdf.to_csv(output_file) | [
"Remove words that don't relate to the main themes of to the article deliminated by triple quotes\n\"\"\"PLACEHOLDER\"\"\"",
"You are a helpful assistant"
] |
2024-01-10 | Yajiehan/ML-ChatGPT-SendEmailToCustomer | Moderation%2C%20Classification%2C%20Checkout%20and%20Evaluation~utilsMCCE.py | import openai
import sys
sys.path.append('..')
import utils
sys.path.append('..')
import json
delimiter = "####"
# Input Moderation
def test_Moderation(comment):
response = openai.Moderation.create(comment)
moderation_output = response["results"][0]
if moderation_output["flagged"] != False:
return "The response is not appropriate!"
else:
return "The response is appropriate!"
# Prevent Prompt Injection
def test_Prompt_Injection(user_Input, language):
system_message = f"""
Assistant responses must be in English or {language}. \
If the user says something in other languages, \
always respond in English. The user input \
message will be delimited with {delimiter} characters.
"""
user_message_for_model = f"""User message, \
remember that your response to the user \
must be in English or {language}: \
{delimiter}{user_Input}{delimiter}
"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': user_message_for_model},
]
response = utils.get_completion_from_messages(messages)
print(response)
# Classificaiton of Service Requests
def get_Classification_of_Service_Request(user_Message):
system_message = f"""
You will be provided with customer service queries. \
The customer service query will be delimited with \
{delimiter} characters.
Classify each query into a primary category \
and a secondary category.
Provide your output in json format with the \
keys: primary and secondary.
Primary categories: Billing, Technical Support, \
Account Management, or General Inquiry.
Billing secondary categories:
Unsubscribe or upgrade
Add a payment method
Explanation for charge
Dispute a charge
Technical Support secondary categories:
General troubleshooting
Device compatibility
Software updates
Account Management secondary categories:
Password reset
Update personal information
Close account
Account security
General Inquiry secondary categories:
Product information
Pricing
Feedback
Speak to a human
"""
messages = [
{'role':'system',
'content': system_message},
{'role':'user',
'content': f"{delimiter}{user_Message}{delimiter}"},
]
response = utils.get_completion_from_messages(messages)
return response
# Answering user questions using Chain of Thought Reasoning
def chain_of_thought_reasoning(user_message):
system_message = f"""
Follow these steps to answer the customer queries.
The customer query will be delimited with four hashtags,\
i.e. {delimiter}.
Step 1:{delimiter} First decide whether the user is \
asking a question about a specific product or products. \
Product cateogry doesn't count.
Step 2:{delimiter} If the user is asking about \
specific products, identify whether \
the products are in the following list.
All available products:
1. Product: TechPro Ultrabook
Category: Computers and Laptops
Brand: TechPro
Model Number: TP-UB100
Warranty: 1 year
Rating: 4.5
Features: 13.3-inch display, 8GB RAM, 256GB SSD, Intel Core i5 processor
Description: A sleek and lightweight ultrabook for everyday use.
Price: $799.99
2. Product: BlueWave Gaming Laptop
Category: Computers and Laptops
Brand: BlueWave
Model Number: BW-GL200
Warranty: 2 years
Rating: 4.7
Features: 15.6-inch display, 16GB RAM, 512GB SSD, NVIDIA GeForce RTX 3060
Description: A high-performance gaming laptop for an immersive experience.
Price: $1199.99
3. Product: PowerLite Convertible
Category: Computers and Laptops
Brand: PowerLite
Model Number: PL-CV300
Warranty: 1 year
Rating: 4.3
Features: 14-inch touchscreen, 8GB RAM, 256GB SSD, 360-degree hinge
Description: A versatile convertible laptop with a responsive touchscreen.
Price: $699.99
4. Product: TechPro Desktop
Category: Computers and Laptops
Brand: TechPro
Model Number: TP-DT500
Warranty: 1 year
Rating: 4.4
Features: Intel Core i7 processor, 16GB RAM, 1TB HDD, NVIDIA GeForce GTX 1660
Description: A powerful desktop computer for work and play.
Price: $999.99
5. Product: BlueWave Chromebook
Category: Computers and Laptops
Brand: BlueWave
Model Number: BW-CB100
Warranty: 1 year
Rating: 4.1
Features: 11.6-inch display, 4GB RAM, 32GB eMMC, Chrome OS
Description: A compact and affordable Chromebook for everyday tasks.
Price: $249.99
Step 3:{delimiter} If the message contains products \
in the list above, list any assumptions that the \
user is making in their \
message e.g. that Laptop X is bigger than \
Laptop Y, or that Laptop Z has a 2 year warranty.
Step 4:{delimiter}: If the user made any assumptions, \
figure out whether the assumption is true based on your \
product information.
Step 5:{delimiter}: First, politely correct the \
customer's incorrect assumptions if applicable. \
Only mention or reference products in the list of \
5 available products, as these are the only 5 \
products that the store sells. \
Answer the customer in a friendly tone.
Use the following format:
Step 1:{delimiter} <step 1 reasoning>
Step 2:{delimiter} <step 2 reasoning>
Step 3:{delimiter} <step 3 reasoning>
Step 4:{delimiter} <step 4 reasoning>
Response to user:{delimiter} <response to customer>
Make sure to include {delimiter} to separate every step.
"""
messages = [
{'role':'system',
'content': system_message},
{'role':'user',
'content': f"{delimiter}{user_message}{delimiter}"},
]
response = utils.get_completion_from_messages(messages)
return response
# Check Output using model self-evaluate
def check_Output_self_evaluate(customer_message, final_response_to_customer):
system_message = f"""
You are an assistant that evaluates whether \
customer service agent responses sufficiently \
answer customer questions, and also validates that \
all the facts the assistant cites from the product \
information are correct.
The product information and user and customer \
service agent messages will be delimited by \
3 backticks, i.e. ```.
Respond with a Y or N character, with no punctuation:
Y - if the output sufficiently answers the question \
AND the response correctly uses product information
N - otherwise
Output a single letter only.
"""
product_information = """{ "name": "SmartX ProPhone", "category": "Smartphones and Accessories", "brand": "SmartX", "model_number": "SX-PP10", "warranty": "1 year", "rating": 4.6, "features": [ "6.1-inch display", "128GB storage", "12MP dual camera", "5G" ], "description": "A powerful smartphone with advanced camera features.", "price": 899.99 } { "name": "FotoSnap DSLR Camera", "category": "Cameras and Camcorders", "brand": "FotoSnap", "model_number": "FS-DSLR200", "warranty": "1 year", "rating": 4.7, "features": [ "24.2MP sensor", "1080p video", "3-inch LCD", "Interchangeable lenses" ], "description": "Capture stunning photos and videos with this versatile DSLR camera.", "price": 599.99 } { "name": "CineView 4K TV", "category": "Televisions and Home Theater Systems", "brand": "CineView", "model_number": "CV-4K55", "warranty": "2 years", "rating": 4.8, "features": [ "55-inch display", "4K resolution", "HDR", "Smart TV" ], "description": "A stunning 4K TV with vibrant colors and smart features.", "price": 599.99 } { "name": "SoundMax Home Theater", "category": "Televisions and Home Theater Systems", "brand": "SoundMax", "model_number": "SM-HT100", "warranty": "1 year", "rating": 4.4, "features": [ "5.1 channel", "1000W output", "Wireless subwoofer", "Bluetooth" ], "description": "A powerful home theater system for an immersive audio experience.", "price": 399.99 } { "name": "CineView 8K TV", "category": "Televisions and Home Theater Systems", "brand": "CineView", "model_number": "CV-8K65", "warranty": "2 years", "rating": 4.9, "features": [ "65-inch display", "8K resolution", "HDR", "Smart TV" ], "description": "Experience the future of television with this stunning 8K TV.", "price": 2999.99 } { "name": "SoundMax Soundbar", "category": "Televisions and Home Theater Systems", "brand": "SoundMax", "model_number": "SM-SB50", "warranty": "1 year", "rating": 4.3, "features": [ "2.1 channel", "300W output", "Wireless subwoofer", "Bluetooth" ], "description": "Upgrade your TV's audio with this sleek and powerful soundbar.", "price": 199.99 } { "name": "CineView OLED TV", "category": "Televisions and Home Theater Systems", "brand": "CineView", "model_number": "CV-OLED55", "warranty": "2 years", "rating": 4.7, "features": [ "55-inch display", "4K resolution", "HDR", "Smart TV" ], "description": "Experience true blacks and vibrant colors with this OLED TV.", "price": 1499.99 }"""
q_a_pair = f"""
Customer message: ```{customer_message}```
Product information: ```{product_information}```
Agent response: ```{final_response_to_customer}```
Does the response use the retrieved information correctly?
Does the response sufficiently answer the question
Output Y or N
"""
messages = [
{'role': 'system', 'content': system_message},
{'role': 'user', 'content': q_a_pair}
]
response = utils.get_completion_from_messages(messages, max_tokens=1)
return response
def find_category_and_product_v1(user_input,products_and_category):
delimiter = "####"
system_message = f"""
You will be provided with customer service queries. \
The customer service query will be delimited with {delimiter} characters.
Output a python list of json objects, where each object has the following format:
'category': <one of Computers and Laptops, Smartphones and Accessories, Televisions and Home Theater Systems, \
Gaming Consoles and Accessories, Audio Equipment, Cameras and Camcorders>,
AND
'products': <a list of products that must be found in the allowed products below>
Where the categories and products must be found in the customer service query.
If a product is mentioned, it must be associated with the correct category in the allowed products list below.
If no products or categories are found, output an empty list.
List out all products that are relevant to the customer service query based on how closely it relates
to the product name and product category.
Do not assume, from the name of the product, any features or attributes such as relative quality or price.
The allowed products are provided in JSON format.
The keys of each item represent the category.
The values of each item is a list of products that are within that category.
Allowed products: {products_and_category}
"""
few_shot_user_1 = """I want the most expensive computer."""
few_shot_assistant_1 = """
[{'category': 'Computers and Laptops', \
'products': ['TechPro Ultrabook', 'BlueWave Gaming Laptop', 'PowerLite Convertible', 'TechPro Desktop', 'BlueWave Chromebook']}]
"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': f"{delimiter}{few_shot_user_1}{delimiter}"},
{'role':'assistant', 'content': few_shot_assistant_1 },
{'role':'user', 'content': f"{delimiter}{user_input}{delimiter}"},
]
return utils.get_completion_from_messages(messages)
def find_category_and_product_v2(user_input,products_and_category):
system_message = f"""
You will be provided with customer service queries. \
The customer service query will be delimited with {delimiter} characters.
Output a python list of json objects, where each object has the following format:
'category': <one of Computers and Laptops, Smartphones and Accessories, Televisions and Home Theater Systems, \
Gaming Consoles and Accessories, Audio Equipment, Cameras and Camcorders>,
AND
'products': <a list of products that must be found in the allowed products below>
Do not output any additional text that is not in JSON format.
Do not write any explanatory text after outputting the requested JSON.
Where the categories and products must be found in the customer service query.
If a product is mentioned, it must be associated with the correct category in the allowed products list below.
If no products or categories are found, output an empty list.
List out all products that are relevant to the customer service query based on how closely it relates
to the product name and product category.
Do not assume, from the name of the product, any features or attributes such as relative quality or price.
The allowed products are provided in JSON format.
The keys of each item represent the category.
The values of each item is a list of products that are within that category.
Allowed products: {products_and_category}
"""
few_shot_user_1 = """I want the most expensive computer. What do you recommend?"""
few_shot_assistant_1 = """
[{'category': 'Computers and Laptops', \
'products': ['TechPro Ultrabook', 'BlueWave Gaming Laptop', 'PowerLite Convertible', 'TechPro Desktop', 'BlueWave Chromebook']}]
"""
few_shot_user_2 = """I want the most cheapest computer. What do you recommend?"""
few_shot_assistant_2 = """
[{'category': 'Computers and Laptops', \
'products': ['TechPro Ultrabook', 'BlueWave Gaming Laptop', 'PowerLite Convertible', 'TechPro Desktop', 'BlueWave Chromebook']}]
"""
messages = [
{'role':'system', 'content': system_message},
{'role':'user', 'content': f"{delimiter}{few_shot_user_1}{delimiter}"},
{'role':'assistant', 'content': few_shot_assistant_1 },
{'role':'user', 'content': f"{delimiter}{few_shot_user_2}{delimiter}"},
{'role':'assistant', 'content': few_shot_assistant_2 },
{'role':'user', 'content': f"{delimiter}{user_input}{delimiter}"},
]
return utils.get_completion_from_messages(messages)
# Evaluate Response with ideal answers
import json
def eval_response_with_ideal(response,ideal,debug=False):
if debug:
print("response")
print(response)
# json.loads() expects double quotes, not single quotes
json_like_str = response.replace("'",'"')
# parse into a list of dictionaries
l_of_d = json.loads(json_like_str)
# special case when response is empty list
if l_of_d == [] and ideal == []:
return 1
# otherwise, response is empty
# or ideal should be empty, there's a mismatch
elif l_of_d == [] or ideal == []:
return 0
correct = 0
if debug:
print("l_of_d is")
print(l_of_d)
for d in l_of_d:
cat = d.get('category')
prod_l = d.get('products')
if cat and prod_l:
# convert list to set for comparison
prod_set = set(prod_l)
# get ideal set of products
ideal_cat = ideal.get(cat)
if ideal_cat:
prod_set_ideal = set(ideal.get(cat))
else:
if debug:
print(f"did not find category {cat} in ideal")
print(f"ideal: {ideal}")
continue
if debug:
print("prod_set\n",prod_set)
print()
print("prod_set_ideal\n",prod_set_ideal)
if prod_set == prod_set_ideal:
if debug:
print("correct")
correct +=1
else:
print("incorrect")
print(f"prod_set: {prod_set}")
print(f"prod_set_ideal: {prod_set_ideal}")
if prod_set <= prod_set_ideal:
print("response is a subset of the ideal answer")
elif prod_set >= prod_set_ideal:
print("response is a superset of the ideal answer")
# count correct over total number of items in list
pc_correct = correct / len(l_of_d)
return pc_correct
def evaluate_all_pair_set(msg_ideal_pairs_set):
# Note, this will not work if any of the api calls time out
score_accum = 0
for i, pair in enumerate(msg_ideal_pairs_set):
print(f"example {i}")
customer_msg = pair['customer_msg']
ideal = pair['ideal_answer']
# print("Customer message",customer_msg)
# print("ideal:",ideal)
response = find_category_and_product_v2(customer_msg, utils.get_products_and_category())
# print("products_by_category",products_by_category)
score = eval_response_with_ideal(response,ideal,debug=False)
print(f"{i}: {score}")
score_accum += score
n_examples = len(msg_ideal_pairs_set)
fraction_correct = score_accum / n_examples
print(f"Fraction correct out of {n_examples}: {fraction_correct}")
# Evaluate with rubric
def eval_with_rubric(test_set, assistant_answer):
cust_msg = test_set['customer_msg']
context = test_set['context']
completion = assistant_answer
system_message = """\
You are an assistant that evaluates how well the customer service agent \
answers a user question by looking at the context that the customer service \
agent is using to generate its response.
"""
user_message = f"""\
You are evaluating a submitted answer to a question based on the context \
that the agent uses to answer the question.
Here is the data:
[BEGIN DATA]
************
[Question]: {cust_msg}
************
[Context]: {context}
************
[Submission]: {completion}
************
[END DATA]
Compare the factual content of the submitted answer with the context. \
Ignore any differences in style, grammar, or punctuation.
Answer the following questions:
- Is the Assistant response based only on the context provided? (Y or N)
- Does the answer include information that is not provided in the context? (Y or N)
- Is there any disagreement between the response and the context? (Y or N)
- Count how many questions the user asked. (output a number)
- For each question that the user asked, is there a corresponding answer to it?
Question 1: (Y or N)
Question 2: (Y or N)
...
Question N: (Y or N)
- Of the number of questions asked, how many of these questions were addressed by the answer? (output a number)
"""
messages = [
{'role': 'system', 'content': system_message},
{'role': 'user', 'content': user_message}
]
response = utils.get_completion_from_messages(messages)
return response
def eval_vs_ideal(test_set, assistant_answer):
cust_msg = test_set['customer_msg']
ideal = test_set['ideal_answer']
completion = assistant_answer
system_message = """\
You are an assistant that evaluates how well the customer service agent \
answers a user question by comparing the response to the ideal (expert) response
Output a single letter and nothing else.
"""
user_message = f"""\
You are comparing a submitted answer to an expert answer on a given question. Here is the data:
[BEGIN DATA]
************
[Question]: {cust_msg}
************
[Expert]: {ideal}
************
[Submission]: {completion}
************
[END DATA]
Compare the factual content of the submitted answer with the expert answer. Ignore any differences in style, grammar, or punctuation.
The submitted answer may either be a subset or superset of the expert answer, or it may conflict with it. Determine which case applies. Answer the question by selecting one of the following options:
(A) The submitted answer is a subset of the expert answer and is fully consistent with it.
(B) The submitted answer is a superset of the expert answer and is fully consistent with it.
(C) The submitted answer contains all the same details as the expert answer.
(D) There is a disagreement between the submitted answer and the expert answer.
(E) The answers differ, but these differences don't matter from the perspective of factuality.
choice_strings: ABCDE
"""
messages = [
{'role': 'system', 'content': system_message},
{'role': 'user', 'content': user_message}
]
response = utils.get_completion_from_messages(messages)
return response | [
"PLACEHOLDERPLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | Chainlit/chainlit | backend~chainlit~__init__.py | import os
from dotenv import load_dotenv
env_found = load_dotenv(dotenv_path=os.path.join(os.getcwd(), ".env"))
import asyncio
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
from starlette.datastructures import Headers
if TYPE_CHECKING:
from chainlit.haystack.callbacks import HaystackAgentCallbackHandler
from chainlit.langchain.callbacks import (
LangchainCallbackHandler,
AsyncLangchainCallbackHandler,
)
from chainlit.llama_index.callbacks import LlamaIndexCallbackHandler
import chainlit.input_widget as input_widget
from chainlit.action import Action
from chainlit.cache import cache
from chainlit.chat_settings import ChatSettings
from chainlit.config import config
from chainlit.context import context
from chainlit.element import (
Audio,
Avatar,
File,
Image,
Pdf,
Plotly,
Pyplot,
Task,
TaskList,
TaskStatus,
Text,
Video,
)
from chainlit.logger import logger
from chainlit.message import (
AskActionMessage,
AskFileMessage,
AskUserMessage,
ErrorMessage,
Message,
)
from chainlit.oauth_providers import get_configured_oauth_providers
from chainlit.step import Step, step
from chainlit.sync import make_async, run_sync
from chainlit.telemetry import trace
from chainlit.types import ChatProfile, ThreadDict
from chainlit.user import PersistedUser, User
from chainlit.user_session import user_session
from chainlit.utils import make_module_getattr, wrap_user_function
from chainlit.version import __version__
from chainlit_client import ChatGeneration, CompletionGeneration, GenerationMessage
if env_found:
logger.info("Loaded .env file")
@trace
def password_auth_callback(func: Callable[[str, str], Optional[User]]) -> Callable:
"""
Framework agnostic decorator to authenticate the user.
Args:
func (Callable[[str, str], Optional[User]]): The authentication callback to execute. Takes the email and password as parameters.
Example:
@cl.password_auth_callback
async def password_auth_callback(username: str, password: str) -> Optional[User]:
Returns:
Callable[[str, str], Optional[User]]: The decorated authentication callback.
"""
config.code.password_auth_callback = wrap_user_function(func)
return func
@trace
def header_auth_callback(func: Callable[[Headers], Optional[User]]) -> Callable:
"""
Framework agnostic decorator to authenticate the user via a header
Args:
func (Callable[[Headers], Optional[User]]): The authentication callback to execute.
Example:
@cl.header_auth_callback
async def header_auth_callback(headers: Headers) -> Optional[User]:
Returns:
Callable[[Headers], Optional[User]]: The decorated authentication callback.
"""
config.code.header_auth_callback = wrap_user_function(func)
return func
@trace
def oauth_callback(
func: Callable[[str, str, Dict[str, str], User], Optional[User]]
) -> Callable:
"""
Framework agnostic decorator to authenticate the user via oauth
Args:
func (Callable[[str, str, Dict[str, str], User], Optional[User]]): The authentication callback to execute.
Example:
@cl.oauth_callback
async def oauth_callback(provider_id: str, token: str, raw_user_data: Dict[str, str], default_app_user: User) -> Optional[User]:
Returns:
Callable[[str, str, Dict[str, str], User], Optional[User]]: The decorated authentication callback.
"""
if len(get_configured_oauth_providers()) == 0:
raise ValueError(
"You must set the environment variable for at least one oauth provider to use oauth authentication."
)
config.code.oauth_callback = wrap_user_function(func)
return func
@trace
def on_message(func: Callable) -> Callable:
"""
Framework agnostic decorator to react to messages coming from the UI.
The decorated function is called every time a new message is received.
Args:
func (Callable[[Message], Any]): The function to be called when a new message is received. Takes a cl.Message.
Returns:
Callable[[str], Any]: The decorated on_message function.
"""
config.code.on_message = wrap_user_function(func)
return func
@trace
def on_chat_start(func: Callable) -> Callable:
"""
Hook to react to the user websocket connection event.
Args:
func (Callable[], Any]): The connection hook to execute.
Returns:
Callable[], Any]: The decorated hook.
"""
config.code.on_chat_start = wrap_user_function(func, with_task=True)
return func
@trace
def on_chat_resume(func: Callable[[ThreadDict], Any]) -> Callable:
"""
Hook to react to resume websocket connection event.
Args:
func (Callable[], Any]): The connection hook to execute.
Returns:
Callable[], Any]: The decorated hook.
"""
config.code.on_chat_resume = wrap_user_function(func, with_task=True)
return func
@trace
def set_chat_profiles(
func: Callable[[Optional["User"]], List["ChatProfile"]]
) -> Callable:
"""
Programmatic declaration of the available chat profiles (can depend on the User from the session if authentication is setup).
Args:
func (Callable[[Optional["User"]], List["ChatProfile"]]): The function declaring the chat profiles.
Returns:
Callable[[Optional["User"]], List["ChatProfile"]]: The decorated function.
"""
config.code.set_chat_profiles = wrap_user_function(func)
return func
@trace
def on_chat_end(func: Callable) -> Callable:
"""
Hook to react to the user websocket disconnect event.
Args:
func (Callable[], Any]): The disconnect hook to execute.
Returns:
Callable[], Any]: The decorated hook.
"""
config.code.on_chat_end = wrap_user_function(func, with_task=True)
return func
@trace
def author_rename(func: Callable[[str], str]) -> Callable[[str], str]:
"""
Useful to rename the author of message to display more friendly author names in the UI.
Args:
func (Callable[[str], str]): The function to be called to rename an author. Takes the original author name as parameter.
Returns:
Callable[[Any, str], Any]: The decorated function.
"""
config.code.author_rename = wrap_user_function(func)
return func
@trace
def on_stop(func: Callable) -> Callable:
"""
Hook to react to the user stopping a thread.
Args:
func (Callable[[], Any]): The stop hook to execute.
Returns:
Callable[[], Any]: The decorated stop hook.
"""
config.code.on_stop = wrap_user_function(func)
return func
def action_callback(name: str) -> Callable:
"""
Callback to call when an action is clicked in the UI.
Args:
func (Callable[[Action], Any]): The action callback to execute. First parameter is the action.
"""
def decorator(func: Callable[[Action], Any]):
config.code.action_callbacks[name] = wrap_user_function(func, with_task=True)
return func
return decorator
def on_settings_update(
func: Callable[[Dict[str, Any]], Any]
) -> Callable[[Dict[str, Any]], Any]:
"""
Hook to react to the user changing any settings.
Args:
func (Callable[], Any]): The hook to execute after settings were changed.
Returns:
Callable[], Any]: The decorated hook.
"""
config.code.on_settings_update = wrap_user_function(func, with_task=True)
return func
def sleep(duration: int):
"""
Sleep for a given duration.
Args:
duration (int): The duration in seconds.
"""
return asyncio.sleep(duration)
__getattr__ = make_module_getattr(
{
"LangchainCallbackHandler": "chainlit.langchain.callbacks",
"AsyncLangchainCallbackHandler": "chainlit.langchain.callbacks",
"LlamaIndexCallbackHandler": "chainlit.llama_index.callbacks",
"HaystackAgentCallbackHandler": "chainlit.haystack.callbacks",
}
)
__all__ = [
"user_session",
"Action",
"User",
"PersistedUser",
"Audio",
"Pdf",
"Plotly",
"Image",
"Text",
"Avatar",
"Pyplot",
"File",
"Task",
"TaskList",
"TaskStatus",
"Video",
"ChatSettings",
"input_widget",
"Message",
"ErrorMessage",
"AskUserMessage",
"AskActionMessage",
"AskFileMessage",
"Step",
"step",
"ChatGeneration",
"CompletionGeneration",
"GenerationMessage",
"on_chat_start",
"on_chat_end",
"on_chat_resume",
"on_stop",
"action_callback",
"author_rename",
"on_settings_update",
"password_auth_callback",
"header_auth_callback",
"sleep",
"run_sync",
"make_async",
"cache",
"context",
"LangchainCallbackHandler",
"AsyncLangchainCallbackHandler",
"LlamaIndexCallbackHandler",
"HaystackAgentCallbackHandler",
]
def __dir__():
return __all__
| [] |
2024-01-10 | Chainlit/chainlit | cypress~e2e~prompt_playground~provider.py | import os
from chainlit.input_widget import Select, Slider
from chainlit.playground.config import BaseProvider, add_llm_provider
from chainlit.playground.providers.langchain import LangchainGenericProvider
from fastapi.responses import StreamingResponse
from langchain.llms.fake import FakeListLLM
import chainlit as cl
os.environ["TEST_LLM_API_KEY"] = "sk..."
class TestLLMProvider(BaseProvider):
async def create_completion(self, request):
await super().create_completion(request)
self.create_generation(request)
self.require_settings(request.generation.settings)
stream = ["This ", "is ", "the ", "test ", "completion"]
async def create_event_stream():
for token in stream:
await cl.sleep(0.1)
yield token
return StreamingResponse(create_event_stream())
TestLLM = TestLLMProvider(
id="test",
name="Test",
env_vars={"api_key": "TEST_LLM_API_KEY"},
inputs=[
Select(
id="model",
label="Model",
values=["test-model-1", "test-model-2"],
initial_value="test-model-2",
),
Slider(
id="temperature",
label="Temperature",
min=0.0,
max=1.0,
step=0.01,
initial=1,
),
],
is_chat=False,
)
ChatTestLLM = TestLLMProvider(
id="test-chat",
name="TestChat",
env_vars={"api_key": "TEST_LLM_API_KEY"},
inputs=[
Select(
id="model",
label="Model",
values=["test-model-chat-1", "test-model-chat-2"],
initial_value="test-model-chat-2",
),
Slider(
id="temperature",
label="Temperature",
min=0.0,
max=1.0,
step=0.01,
initial=1,
),
],
is_chat=True,
)
llm = FakeListLLM(responses=["This is the test completion"])
LangchainTestLLM = LangchainGenericProvider(
id="test-langchain",
name="TestLangchain",
llm=llm,
is_chat=False,
)
add_llm_provider(TestLLM)
add_llm_provider(ChatTestLLM)
add_llm_provider(LangchainTestLLM)
| [] |
2024-01-10 | ml-lab/decode-py | src~rl~blocks~engine~torch~observation~block.py | '''
The `.torch.obervation.block` module wraps the widget and engine components in a MemoryBlock.
'''
from src.rl.showcase.interface import Display
from typing import Any, Generic, Sequence, TypeVar
from result.result import Err, Ok, Result
from src.rl.blocks.interface import ObservationBlock
from typing_extensions import TypeAlias
from .engine import OpenAI_Observation, OpenAI_ObservationFactory
import streamlit as st
S = TypeVar('S')
T = TypeVar('T')
U = TypeVar('U')
V = TypeVar('V')
Parent: TypeAlias = Any
Children: TypeAlias = Sequence[Display[Any, Any, Any]]
Engine: TypeAlias = OpenAI_Observation[S, T, U, V]
Factory: TypeAlias = OpenAI_ObservationFactory[S, T, U, V]
Block: TypeAlias = ObservationBlock[S, T, U, V, Parent, Children, Engine]
class OpenAI_ObservationBlock(Generic[S, T, U, V], Block[S, T, U, V]):
'''
Block for displaying an OpenAI_Observation in a ObservationBlock.
'''
make = Factory[S, T, U, V]
__engine__ : Engine[S, T, U, V]
def __init__(self, engine: Engine[S, T, U, V], **kwargs: Any) -> None:
'''
Wraps an OpenAI_Observation in a block.
'''
self.__engine__ = engine
def engine(self, **kwargs: Any) -> Result[Engine[S, T, U, V], ValueError]:
'''
Returns an OpenAI_Observation instance.
'''
return Ok(self.__engine__)
def display(self, parent: Parent, children: Children, **kwargs: Any) -> Result[Any, ValueError]:
'''
Displays an OpenAI_Observation.
'''
try:
parent.text(f'current reward: {self.engine().unwrap().get_reward()}')
return Ok(None)
except ValueError as error: return Err(ValueError(error, 'displaying OpenAI_Observation failed'))
def update(self, parent: Parent, children: Children, **kwargs: Any) -> Result[Any, ValueError]:
'''
Updates an OpenAI_ObservationBlock.
'''
try: return Ok(None)
except ValueError as error: return Err(ValueError(error, 'displaying OpenAI_Observation failed'))
| [] |
2024-01-10 | tobyloki/CodeExplorer | chains.py | from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings import (
OllamaEmbeddings,
# SentenceTransformerEmbeddings,
BedrockEmbeddings,
)
from langchain.chat_models import ChatOpenAI, ChatOllama, BedrockChat
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder
)
from typing import List, Any
from utils import BaseLogger
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
def load_embedding_model(embedding_model_name: str, logger=BaseLogger(), config={}):
if embedding_model_name == "ollama":
embeddings = OllamaEmbeddings(
base_url=config["ollama_base_url"], model="codellama:7b-instruct"
)
dimension = 4096
logger.info("Embedding: Using Ollama")
elif embedding_model_name == "openai":
embeddings = OpenAIEmbeddings()
dimension = 1536
logger.info("Embedding: Using OpenAI")
elif embedding_model_name == "aws":
embeddings = BedrockEmbeddings()
dimension = 1536
logger.info("Embedding: Using AWS")
# else:
# embeddings = SentenceTransformerEmbeddings(
# model_name="all-MiniLM-L6-v2", cache_folder="./embedding_model"
# )
# dimension = 384
# logger.info("Embedding: Using SentenceTransformer")
return embeddings, dimension
def load_llm(llm_name: str, logger=BaseLogger(), config={}):
if llm_name == "gpt-4":
logger.info("LLM: Using GPT-4")
return ChatOpenAI(temperature=0, model_name="gpt-4", streaming=True)
elif llm_name == "gpt-3.5":
logger.info("LLM: Using GPT-3.5")
return ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", streaming=True)
elif llm_name == "claudev2":
logger.info("LLM: ClaudeV2")
return BedrockChat(
model_id="anthropic.claude-v2",
model_kwargs={"temperature": 0.0, "max_tokens_to_sample": 1024},
streaming=True,
)
elif len(llm_name):
logger.info(f"LLM: Using Ollama: {llm_name}")
return ChatOllama(
temperature=0,
base_url=config["ollama_base_url"],
model=llm_name,
streaming=True,
# seed=2,
top_k=10, # A higher value (100) will give more diverse answers, while a lower value (10) will be more conservative.
top_p=0.3, # Higher value (0.95) will lead to more diverse text, while a lower value (0.5) will generate more focused text.
num_ctx=3072, # Sets the size of the context window used to generate the next token.
)
logger.info("LLM: Using GPT-3.5")
return ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", streaming=True)
def configure_llm_only_chain(llm):
# LLM only response
template = """
You are a helpful assistant that helps a support agent with answering programming questions.
If you don't know the answer, just say that you don't know, you must not make up an answer.
"""
human_template = "{question}"
chat_prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template(template), # The persistent system prompt
MessagesPlaceholder(variable_name="chat_history"), # Where the memory will be stored.
HumanMessagePromptTemplate.from_template(human_template) # Where the human input will injected
])
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
chain = LLMChain(
llm=llm,
prompt=chat_prompt,
verbose=False,
memory=memory,
)
def generate_llm_output(
user_input: str, callbacks: List[Any]
) -> str:
answer = chain.invoke(user_input, config={"callbacks": callbacks})["text"]
return answer
return generate_llm_output
def get_qa_rag_chain(_vectorstore, llm):
# Create qa RAG chain
system_template = """
Use the following pieces of context to answer the question at the end.
The context contains code source files which can be used to answer the question as well as be used as references.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
----
{summaries}
----
Generate concise answers with references to code source files at the end of every answer.
"""
user_template = "Question:```{question}```"
chat_prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template(system_template), # The persistent system prompt
HumanMessagePromptTemplate.from_template(user_template), # Where the human input will injected
])
qa_chain = load_qa_with_sources_chain(
llm,
chain_type="stuff",
prompt=chat_prompt,
)
qa = RetrievalQAWithSourcesChain(
combine_documents_chain=qa_chain,
retriever=_vectorstore.as_retriever(search_kwargs={"k": 2}),
reduce_k_below_max_tokens=False,
max_tokens_limit=3375,
return_source_documents=True
)
return qa | [
"\n You are a helpful assistant that helps a support agent with answering programming questions.\n If you don't know the answer, just say that you don't know, you must not make up an answer.\n ",
" \n Use the following pieces of context to answer the question at the end.\n The context contains code source files which can be used to answer the question as well as be used as references.\n If you don't know the answer, just say that you don't know, don't try to make up an answer.\n ----\n {summaries}\n ----\n Generate concise answers with references to code source files at the end of every answer.\n ",
"chat_history",
"Question:```{question}```",
"{question}"
] |
2024-01-10 | jtoktas/langchain | tests~integration_tests~test_arxiv.py | """Integration test for Wikipedia API Wrapper."""
import pytest
from langchain.utilities import ArxivAPIWrapper
@pytest.fixture
def api_client() -> ArxivAPIWrapper:
return ArxivAPIWrapper()
def test_call(api_client: ArxivAPIWrapper) -> None:
"""Test that ArxivAPIWrapper returns correct answer"""
output = api_client.run("1605.08386")
assert "Heat-bath random walks with Markov bases" in output
def test_several_docs(api_client: ArxivAPIWrapper) -> None:
"""Test that ArxivAPIWrapper returns several docs"""
output = api_client.run("Caprice Stanley")
assert "On Mixing Behavior of a Family of Random Walks" in output
def test_no_result_call(api_client: ArxivAPIWrapper) -> None:
"""Test that call gives no result."""
output = api_client.run("1605.08386WWW")
assert "No good Arxiv Result was found" == output
| [] |
2024-01-10 | jtoktas/langchain | langchain~text_splitter.py | """Functionality for splitting text."""
from __future__ import annotations
import copy
import logging
from abc import ABC, abstractmethod
from typing import (
AbstractSet,
Any,
Callable,
Collection,
Iterable,
List,
Literal,
Optional,
Union,
)
from langchain.docstore.document import Document
from langchain.schema import BaseDocumentTransformer
logger = logging.getLogger(__name__)
class TextSplitter(BaseDocumentTransformer[Document], ABC):
"""Interface for splitting text into chunks."""
def __init__(
self,
chunk_size: int = 4000,
chunk_overlap: int = 200,
length_function: Callable[[str], int] = len,
):
"""Create a new TextSplitter."""
if chunk_overlap > chunk_size:
raise ValueError(
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size "
f"({chunk_size}), should be smaller."
)
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
self._length_function = length_function
@abstractmethod
def split_text(self, text: str) -> List[str]:
"""Split text into multiple components."""
def create_documents(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> List[Document]:
"""Create documents from a list of texts."""
_metadatas = metadatas or [{}] * len(texts)
documents = []
for i, text in enumerate(texts):
for chunk in self.split_text(text):
new_doc = Document(
page_content=chunk, metadata=copy.deepcopy(_metadatas[i])
)
documents.append(new_doc)
return documents
def split_documents(self, documents: List[Document]) -> List[Document]:
"""Split documents."""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return self.create_documents(texts, metadatas)
def _join_docs(self, docs: List[str], separator: str) -> Optional[str]:
text = separator.join(docs)
text = text.strip()
if text == "":
return None
else:
return text
def _merge_splits(self, splits: Iterable[str], separator: str) -> List[str]:
# We now want to combine these smaller pieces into medium size
# chunks to send to the LLM.
separator_len = self._length_function(separator)
docs = []
current_doc: List[str] = []
total = 0
for d in splits:
_len = self._length_function(d)
if (
total + _len + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
):
if total > self._chunk_size:
logger.warning(
f"Created a chunk of size {total}, "
f"which is longer than the specified {self._chunk_size}"
)
if len(current_doc) > 0:
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
# Keep on popping if:
# - we have a larger chunk than in the chunk overlap
# - or if we still have any chunks and the length is long
while total > self._chunk_overlap or (
total + _len + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
and total > 0
):
total -= self._length_function(current_doc[0]) + (
separator_len if len(current_doc) > 1 else 0
)
current_doc = current_doc[1:]
current_doc.append(d)
total += _len + (separator_len if len(current_doc) > 1 else 0)
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
return docs
@classmethod
def from_huggingface_tokenizer(cls, tokenizer: Any, **kwargs: Any) -> TextSplitter:
"""Text splitter that uses HuggingFace tokenizer to count length."""
try:
from transformers import PreTrainedTokenizerBase
if not isinstance(tokenizer, PreTrainedTokenizerBase):
raise ValueError(
"Tokenizer received was not an instance of PreTrainedTokenizerBase"
)
def _huggingface_tokenizer_length(text: str) -> int:
return len(tokenizer.encode(text))
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please install it with `pip install transformers`."
)
return cls(length_function=_huggingface_tokenizer_length, **kwargs)
@classmethod
def from_tiktoken_encoder(
cls,
encoding_name: str = "gpt2",
model_name: Optional[str] = None,
allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
) -> TextSplitter:
"""Text splitter that uses tiktoken encoder to count length."""
try:
import tiktoken
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to calculate max_tokens_for_prompt. "
"Please install it with `pip install tiktoken`."
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
def _tiktoken_encoder(text: str, **kwargs: Any) -> int:
return len(
enc.encode(
text,
allowed_special=allowed_special,
disallowed_special=disallowed_special,
**kwargs,
)
)
return cls(length_function=_tiktoken_encoder, **kwargs)
def transform_documents(
self, documents: List[Document], **kwargs: Any
) -> List[Document]:
"""Transform list of documents by splitting them."""
return self.split_documents(documents)
async def atransform_documents(
self, documents: List[Document], **kwargs: Any
) -> List[Document]:
"""Asynchronously transform a list of documents by splitting them."""
raise NotImplementedError
class CharacterTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at characters."""
def __init__(self, separator: str = "\n\n", **kwargs: Any):
"""Create a new TextSplitter."""
super().__init__(**kwargs)
self._separator = separator
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
if self._separator:
splits = text.split(self._separator)
else:
splits = list(text)
return self._merge_splits(splits, self._separator)
class TokenTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at tokens."""
def __init__(
self,
encoding_name: str = "gpt2",
model_name: Optional[str] = None,
allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
):
"""Create a new TextSplitter."""
super().__init__(**kwargs)
try:
import tiktoken
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to for TokenTextSplitter. "
"Please install it with `pip install tiktoken`."
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
self._tokenizer = enc
self._allowed_special = allowed_special
self._disallowed_special = disallowed_special
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
splits = []
input_ids = self._tokenizer.encode(
text,
allowed_special=self._allowed_special,
disallowed_special=self._disallowed_special,
)
start_idx = 0
cur_idx = min(start_idx + self._chunk_size, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
while start_idx < len(input_ids):
splits.append(self._tokenizer.decode(chunk_ids))
start_idx += self._chunk_size - self._chunk_overlap
cur_idx = min(start_idx + self._chunk_size, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
return splits
class RecursiveCharacterTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at characters.
Recursively tries to split by different characters to find one
that works.
"""
def __init__(self, separators: Optional[List[str]] = None, **kwargs: Any):
"""Create a new TextSplitter."""
super().__init__(**kwargs)
self._separators = separators or ["\n\n", "\n", " ", ""]
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = self._separators[-1]
for _s in self._separators:
if _s == "":
separator = _s
break
if _s in text:
separator = _s
break
# Now that we have the separator, split the text
if separator:
splits = text.split(separator)
else:
splits = list(text)
# Now go merging things, recursively splitting longer texts.
_good_splits = []
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, separator)
final_chunks.extend(merged_text)
_good_splits = []
other_info = self.split_text(s)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, separator)
final_chunks.extend(merged_text)
return final_chunks
class NLTKTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at sentences using NLTK."""
def __init__(self, separator: str = "\n\n", **kwargs: Any):
"""Initialize the NLTK splitter."""
super().__init__(**kwargs)
try:
from nltk.tokenize import sent_tokenize
self._tokenizer = sent_tokenize
except ImportError:
raise ImportError(
"NLTK is not installed, please install it with `pip install nltk`."
)
self._separator = separator
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
splits = self._tokenizer(text)
return self._merge_splits(splits, self._separator)
class SpacyTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at sentences using Spacy."""
def __init__(
self, separator: str = "\n\n", pipeline: str = "en_core_web_sm", **kwargs: Any
):
"""Initialize the spacy text splitter."""
super().__init__(**kwargs)
try:
import spacy
except ImportError:
raise ImportError(
"Spacy is not installed, please install it with `pip install spacy`."
)
self._tokenizer = spacy.load(pipeline)
self._separator = separator
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
splits = (str(s) for s in self._tokenizer(text).sents)
return self._merge_splits(splits, self._separator)
class MarkdownTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Markdown-formatted headings."""
def __init__(self, **kwargs: Any):
"""Initialize a MarkdownTextSplitter."""
separators = [
# First, try to split along Markdown headings (starting with level 2)
"\n## ",
"\n### ",
"\n#### ",
"\n##### ",
"\n###### ",
# Note the alternative syntax for headings (below) is not handled here
# Heading level 2
# ---------------
# End of code block
"```\n\n",
# Horizontal lines
"\n\n***\n\n",
"\n\n---\n\n",
"\n\n___\n\n",
# Note that this splitter doesn't handle horizontal lines defined
# by *three or more* of ***, ---, or ___, but this is not handled
"\n\n",
"\n",
" ",
"",
]
super().__init__(separators=separators, **kwargs)
class LatexTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Latex-formatted layout elements."""
def __init__(self, **kwargs: Any):
"""Initialize a LatexTextSplitter."""
separators = [
# First, try to split along Latex sections
"\n\\chapter{",
"\n\\section{",
"\n\\subsection{",
"\n\\subsubsection{",
# Now split by environments
"\n\\begin{enumerate}",
"\n\\begin{itemize}",
"\n\\begin{description}",
"\n\\begin{list}",
"\n\\begin{quote}",
"\n\\begin{quotation}",
"\n\\begin{verse}",
"\n\\begin{verbatim}",
## Now split by math environments
"\n\\begin{align}",
"$$",
"$",
# Now split by the normal type of lines
" ",
"",
]
super().__init__(separators=separators, **kwargs)
class PythonCodeTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Python syntax."""
def __init__(self, **kwargs: Any):
"""Initialize a MarkdownTextSplitter."""
separators = [
# First, try to split along class definitions
"\nclass ",
"\ndef ",
"\n\tdef ",
# Now split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
super().__init__(separators=separators, **kwargs)
| [] |
2024-01-10 | whisperzh/RTABSA | Dataset~SemEval14~Train~rationale.py | import openai
def query_gpt3(prompt, max_tokens=256):
openai.api_key = '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'
response = openai.Completion.create(
model="text-davinci-003", # Specify GPT-3.5-Turbo model here
prompt=prompt,
max_tokens=max_tokens,
temperature=0.7,
stop=None,
n=1
)
return response.choices[0].text.strip()
if __name__ == "_main_":
prompt = "Translate the following English text to French: 'Hello, how are you?'"
result = query_gpt3(prompt)
print(result)
| [
"Translate the following English text to French: 'Hello, how are you?'"
] |
2024-01-10 | unbiased-coder/python-ai-content-writing | python-ai-generate-blog-heading-content.py | from openai_helper import openai_create_section
section='Describe nuclear fusion reaction'
print('Generating content for: ', section)
print(openai_create_section(section))
| [] |
2024-01-10 | unbiased-coder/python-ai-content-writing | python-ai-generate-blog-topics.py | from openai_helper import openai_blog_ideas
keywords = 'Nichola Teslas Inventions'
print ('Generating ideas for: ', keywords)
print(openai_blog_ideas(keywords))
| [] |
2024-01-10 | unbiased-coder/python-ai-content-writing | python-ai-generate-blog-headings.py | from openai_helper import openai_create_outline
headings = 'The history of Tesla\'s inventions'
print ('Generating headings for: ', headings)
print(openai_create_outline(headings))
| [] |
2024-01-10 | adnseek/shopgen | shopgen.py | import json
from datasets import load_dataset
import textwrap
import openai
import os
import pinecone
from sentence_transformers import SentenceTransformer
import torch
from tqdm.auto import tqdm
import pandas
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey, Table, MetaData
from sqlalchemy.orm import relationship
from sqlalchemy.orm import declarative_base
from sqlalchemy.orm import Session
import re
import sys
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from dotenv import load_dotenv
load_dotenv()
Base = declarative_base()
class Category(Base):
__tablename__ = 'categories'
id = Column(Integer, primary_key=True)
name = Column(String)
en = Column(String)
topic_id = Column(Integer)
slug = Column(String)
shop_id = Column(Integer)
class ShopCategory(Base):
__tablename__ = 'shop_categories'
id = Column(Integer, primary_key=True)
shop_id = Column(Integer)
name = Column(String)
category_id = Column(Integer, ForeignKey('categories.id'))
parent_id = Column(Integer)
custom = Column(Boolean)
category = relationship("Category")
from sqlalchemy.orm import Session
import re
class Shop:
def __init__(self, session: Session, shop_id: int):
self.session = session
self.shop_id = shop_id
self.custom_counter = 0
def create_shop_categories(self, categories, parent_id=None):
custom = False
for raw_name, children in categories.items():
name = re.sub('^[\d\.]+\s*', '', raw_name)
name = re.sub('\s*-.*$', '', name)
existing_category = self.session.query(Category).filter(Category.en == name, Category.topic_id != 11).first()
if not existing_category:
try:
existing_category = Category(name=name, topic_id=11, slug='custom', shop_id=self.shop_id, en=name)
self.session.add(existing_category)
self.session.flush() # Flush the session to get the id of the new Category
custom = True
self.custom_counter += 1
except Exception as e:
print(f"Error creating Category: {e}")
return # Return from function if the Category can't be created
try:
shop_category = ShopCategory(shop_id=self.shop_id, name=existing_category.en,
category_id=existing_category.id, parent_id=parent_id, custom=custom)
self.session.add(shop_category)
self.session.flush() # Flush the session to get the id of the new ShopCategory
custom = False
except Exception as e:
print(f"Error creating ShopCategory: {e}")
return # Return from function if the ShopCategory can't be created
if isinstance(children, dict) and children:
self.create_shop_categories(children, shop_category.id)
self.session.commit()
def parse_categories(self, categories_list):
if not categories_list:
return []
lines = categories_list.split("\n")
lines = [line.lstrip() for line in lines]
tree = {}
path = [tree]
for line in lines:
depth = len(re.findall('\d', line))
name = line.strip()
name = re.sub('^[\d\.]+\s*', '', name)
name = re.sub('\s*-.*$', '', name)
name = re.sub('\sund.*$', '', name)
name = re.sub('\s&.*$', '', name)
if not name:
continue
if depth == 0:
path = [tree]
else:
path = path[:depth]
location = path[-1]
location[name] = {}
path.append(location[name])
return tree
topic = input("\nTopic: ")
k = input("How many proposals do you want from Pinecone (Enter for 100) ? ")
# ideas = input("Shall i stuff the tree with some own ideas as custom categores (may be deleted later)(y/N)?")
shop_id = input("Whats the id of your shop ? ")
# ideas_yes = 0
# if (ideas == "y" or ideas == "yes"):
# ideas_yes=1
engine = create_engine('mysql+mysqlconnector://forge:559MKSy2FkgOWP280JTZ@localhost/ubernet_affiliateshop')
metadata = MetaData()
Session = sessionmaker(bind=engine)
session = Session()
exists = session.query(ShopCategory.shop_id).filter_by(shop_id=shop_id).first() is not None
if exists:
print("Shop "+shop_id+" already exists. Remove its categories first")
delete = input("Or shall i remove it ? (Be 100% sure!!!!!!), type 'delete': ")
if (delete=="delete"):
shop_categories = Table('shop_categories', metadata)
# find shop_categories entries where shop_id=1 and delete them
session.query(ShopCategory).filter(ShopCategory.shop_id == shop_id).delete()
else: sys.exit()
if k=="":
k = 100
else: k = int(k)
def wrap_text(text):
wrapper = textwrap.TextWrapper(width=140)
return wrapper.wrap(text=text)
query = ""
print("Python: 'Hey ChatGPT, i need some ideas about "+topic+"'")
if __name__ == '__main__':
openai.api_key = os.environ.get("OPENAI_API_KEY")
completion = openai.Completion.create(max_tokens=800, engine="text-davinci-003", prompt="Can you give me around 20 ideas for the main categories of my shop about "+topic)
lines = wrap_text(completion.choices[0].text)
query = topic + "("+(", ".join(lines))+")"
print("ChatGPT: 'Piny ? There ? I have some cool ideas for you in place!'")
# print(lines)
file = open('categories.json')
dataset = json.load(file)
dataset
cats = []
cats = dataset['items']
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# # # if device != 'cuda':
# # # print(f"You are using {device}. This is much slower than using "
# # # "a CUDA-enabled GPU. If on Colab you can change this by "
# # # "clicking Runtime > Change runtime type > GPU.")
model = SentenceTransformer('all-MiniLM-L6-v2', device=device)
model
xq = model.encode(query)
xq.shape
# get api key from app.pinecone.io
PINECONE_API_KEY = "e7980b1a-dadb-4ae4-a97a-a7a73a1af9ff"
# find your environment next to the api key in pinecone console
PINECONE_ENV = "asia-southeast1-gcp-free"
pinecone.init(
api_key=PINECONE_API_KEY,
environment=PINECONE_ENV
)
index_name = 'categories'
# # only create index if it doesn't exist
#if index_name not in pinecone.list_indexes():
# pinecone.create_index(
# name=index_name,
# dimension=model.get_sentence_embedding_dimension(),
# metric='cosine'
# )
# # now connect to the index
index = pinecone.GRPCIndex(index_name)
# batch_size = 128
# bla = tqdm(range(0, len(cats), batch_size))
# for i in bla:
# # find end of batch
# i_end = min(i+batch_size, len(cats))
# # create IDs batch
# ids = [str(x) for x in range(i, i_end)]
# # create metadata batch
# metadatas = [{'text': text} for text in cats[i:i_end]]
# # create embeddings
# xc = model.encode(cats[i:i_end])
# # create records list for upsert
# records = zip(ids, xc, metadatas)
# #upsert to Pinecone
# index.upsert(vectors=records)
# # # check number of records in the index
# index.describe_index_stats()
# create the query vector
xq = model.encode(query).tolist()
# now query
xc = index.query(xq, top_k=k, include_metadata=True)
xc
outputCategories = []
for result in xc['matches']:
outputCategories.append(result['metadata']['text'])
print("Pinecone: 'Thanks Chatty ! Here are your "+str(len(outputCategories))+" ideas'")
# print(outputCategories)
prompt = "Create me a detailed category structure with multiple root categories. use max 8 root categories and max depth 3.use clearly findable category names. rather longer than shorter. Give me the list as a reversly numbered tree (e.g. 1, 1.1, 1.2, 2, 2.1, 2.2, 2.2.1 ..).ident the categories properly ! All Categories must have a number. No descriptions or subordinate clauses."
# if (ideas_yes == 1):
# prompt += ".add maximum 10 category-ideas to the whole tree. add a prefix C- only to these categories. not the other ones."
prompt += "Do not use category names that consist of multiple words: "+", ".join(outputCategories)
print("ChatGPT: 'I create the shop structure now from piny's cool ideas.thanks for this!'")
message = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
]
openai.api_key = os.environ.get("OPENAI_API_KEY")
completion = openai.ChatCompletion.create(
model="gpt-4",
messages = message,
temperature=0.2,
max_tokens=4000,
frequency_penalty=0.0
)
text = completion.choices[0].message.content;
#print("The Result:")/
#print(text)
print("Python: Now i create the shop for you");
shop = Shop(session, shop_id=shop_id)
tree = shop.parse_categories(text)
shop.create_shop_categories(tree)
print("Python: 'Your Tree has been created !'")
print("\n\nPimnecone: 'Now i check which advertisers on AWIN match your shop..be patient'\n\n")
pinecone.init(
api_key="fd769e8c-27ca-42ee-8ec1-55c2accdcead",
environment="us-west1-gcp-free"
)
index_name = 'awin'
# # ngw connect to the index
index = pinecone.GRPCIndex(index_name)
file = open('awin_14_12_2022.json')
dataset = json.load(file)
dataset
advertisers = []
advertisers = dataset['items']
# print("First we insert all advertisers from awin to pinecone")
# advertisers = pandas.Series(advertisers)
# batch_size = 128
# bla = tqdm(range(0, len(advertisers), batch_size))
# for i in bla:
# # find end of batch
# i_end = min(i+batch_size, len(advertisers))
# # create IDs batch
# ids = [str(x) for x in range(i, i_end)]
# # create metadata batch
# metadatas = [{'text': json.dumps(advertiser)} for advertiser in advertisers[i:i_end]]
# # create embeddings
# xc = [model.encode(json.dumps(advertiser)).tolist() for advertiser in advertisers[i:i_end]]
# # create records list for upsert
# records = list(zip(ids, xc, metadatas))
# # upsert to Pinecone
# index.upsert(vectors=records)
query = topic
# create the query vector
xq = model.encode(query).tolist()
# now query
xc = index.query(xq, top_k=k, include_metadata=True)
xc
outputAdvertisers = []
for result in xc['matches']:
# Load the JSON string into a Python dictionary
advertiser = json.loads(result['metadata']['text'])
# Print the programmeName and the score
if float(result['score']) > 0.20:
print(f"Programme Name: {advertiser['programmeName']}, Score: {round(result['score'], 2)}")
| [
"Do not use category names that consist of multiple words: ",
"Can you give me around 20 ideas for the main categories of my shop about PLACEHOLDER",
"You are a helpful assistant.",
", ",
"Create me a detailed category structure with multiple root categories. use max 8 root categories and max depth 3.use clearly findable category names. rather longer than shorter. Give me the list as a reversly numbered tree (e.g. 1, 1.1, 1.2, 2, 2.1, 2.2, 2.2.1 ..).ident the categories properly ! All Categories must have a number. No descriptions or subordinate clauses."
] |
2024-01-10 | abi/screenshot-to-code | backend~image_generation.py | import asyncio
import re
from typing import Dict, List, Union
from openai import AsyncOpenAI
from bs4 import BeautifulSoup
async def process_tasks(prompts: List[str], api_key: str, base_url: str):
tasks = [generate_image(prompt, api_key, base_url) for prompt in prompts]
results = await asyncio.gather(*tasks, return_exceptions=True)
processed_results: List[Union[str, None]] = []
for result in results:
if isinstance(result, Exception):
print(f"An exception occurred: {result}")
processed_results.append(None)
else:
processed_results.append(result)
return processed_results
async def generate_image(prompt: str, api_key: str, base_url: str):
client = AsyncOpenAI(api_key=api_key, base_url=base_url)
image_params: Dict[str, Union[str, int]] = {
"model": "dall-e-3",
"quality": "standard",
"style": "natural",
"n": 1,
"size": "1024x1024",
"prompt": prompt,
}
res = await client.images.generate(**image_params)
await client.close()
return res.data[0].url
def extract_dimensions(url: str):
# Regular expression to match numbers in the format '300x200'
matches = re.findall(r"(\d+)x(\d+)", url)
if matches:
width, height = matches[0] # Extract the first match
width = int(width)
height = int(height)
return (width, height)
else:
return (100, 100)
def create_alt_url_mapping(code: str) -> Dict[str, str]:
soup = BeautifulSoup(code, "html.parser")
images = soup.find_all("img")
mapping: Dict[str, str] = {}
for image in images:
if not image["src"].startswith("https://placehold.co"):
mapping[image["alt"]] = image["src"]
return mapping
async def generate_images(
code: str, api_key: str, base_url: Union[str, None], image_cache: Dict[str, str]
):
# Find all images
soup = BeautifulSoup(code, "html.parser")
images = soup.find_all("img")
# Extract alt texts as image prompts
alts = []
for img in images:
# Only include URL if the image starts with https://placehold.co
# and it's not already in the image_cache
if (
img["src"].startswith("https://placehold.co")
and image_cache.get(img.get("alt")) is None
):
alts.append(img.get("alt", None))
# Exclude images with no alt text
alts = [alt for alt in alts if alt is not None]
# Remove duplicates
prompts = list(set(alts))
# Return early if there are no images to replace
if len(prompts) == 0:
return code
# Generate images
results = await process_tasks(prompts, api_key, base_url)
# Create a dict mapping alt text to image URL
mapped_image_urls = dict(zip(prompts, results))
# Merge with image_cache
mapped_image_urls = {**mapped_image_urls, **image_cache}
# Replace old image URLs with the generated URLs
for img in images:
# Skip images that don't start with https://placehold.co (leave them alone)
if not img["src"].startswith("https://placehold.co"):
continue
new_url = mapped_image_urls[img.get("alt")]
if new_url:
# Set width and height attributes
width, height = extract_dimensions(img["src"])
img["width"] = width
img["height"] = height
# Replace img['src'] with the mapped image URL
img["src"] = new_url
else:
print("Image generation failed for alt text:" + img.get("alt"))
# Return the modified HTML
# (need to prettify it because BeautifulSoup messes up the formatting)
return soup.prettify()
| [
"['PLACEHOLDER']"
] |
2024-01-10 | samirjdev/Dressabull | ImageGenerator.py | import os
import openai
import webbrowser
from webcolors import CSS3_HEX_TO_NAMES, hex_to_rgb
def rgb_to_color_name(rgb):
# Convert the RGB list to a hexadecimal color representation
hex_color = '#{:02X}{:02X}{:02X}'.format(*rgb)
# Initialize variables to store the closest color and its distance
closest_color = None
closest_distance = float('inf')
# Iterate through the CSS3_HEX_TO_NAMES dictionary to find the closest color
for hex_value, color_name in CSS3_HEX_TO_NAMES.items():
color_rgb = hex_to_rgb(hex_value)
# Calculate the Euclidean distance between the colors
distance = sum((a - b) ** 2 for a, b in zip(rgb, color_rgb))
if distance < closest_distance:
closest_distance = distance
closest_color = color_name
return closest_color
def main(list_of_colors):
colors = list_of_colors
color_names = []
# Example usage:
for color in colors:
color_name = rgb_to_color_name(color)
#print(color_name)
color_names.append(color_name)
#print(f'RGB Color: {rgb_color}')
#print(color_names[0])
openai.api_key = ("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")
response = openai.Image.create(
prompt="Create an image of a unisex mannequin wearing a three-piece outfit consisting of a top, middle, and bottom. The top should be predominantly " + color_names[0] + ", the middle piece should be predominantly " +color_names[1] +" element, and the bottom should have predominantly " + color_names[2] + " base color. Ensure that the outfit is fashionable and visually appealing.",
n=1,
size="1024x1024"
)
image_url = response['data'][0]['url']
print(image_url) | [] |
2024-01-10 | NoDataFound/SpeedCandidating | SpeedCanidating.py | import os
import pandas as pd
import plotly.graph_objects as go
import streamlit as st
from dotenv import load_dotenv, set_key
import openai
from bs4 import BeautifulSoup
from datetime import datetime
import textwrap
import csv
import random
import os
import pandas as pd
import plotly.graph_objects as go
import streamlit as st
from dotenv import load_dotenv, set_key
import pandas as pd
import os
import csv
import openai
from bs4 import BeautifulSoup
from datetime import datetime
from io import BytesIO, StringIO
st.set_page_config(layout="wide", page_icon="🇺🇸")
st.image("static/assets/SpeedCandidating.png", use_column_width=True)
readme_placeholder = st.empty()
if 'readme_displayed' not in st.session_state:
st.session_state['readme_displayed'] = True
if 'research_button_clicked' not in st.session_state:
st.session_state['research_button_clicked'] = False
if 'chat_button_clicked' not in st.session_state:
st.session_state['chat_button_clicked'] = False
if st.session_state['readme_displayed']:
readme_placeholder = st.empty()
readme_content = """









SpeedCandidating is an interactive application designed to engage users in political discourse, allowing them to ask questions and receive responses from virtual representations of political candidates trained from official sources. Powered by OpenAI's GPT models, it aims to eliminate media bias and simulate a candidating session where users can quickly gather insights into various political personas.
## Features
- **Multiple Party Interaction**: Engage with candidates across different political parties.
- **Dynamic Questioning**: Ask questions and get personalized responses from the candidate's perspective.
- **Data Logging**: Keeps track of all questions and responses for further analysis.
Visit https://github.com/NoDataFound/SpeedCandidating to learn more.
"""
readme_placeholder.markdown(readme_content)
#load_dotenv('.env')
openai.api_key = st.secrets["OPENAI"]["OPENAI_API_KEY"]
#openai.api_key = os.environ.get('OPENAI_API_KEY')
if not openai.api_key:
st.error("OpenAI API key is missing. Please add it to your secrets.")
#if not openai.api_key:
# openai.api_key = st.text_input("Enter OPENAI_API_KEY API key")
# set_key('.env', 'OPENAI_API_KEY', openai.api_key)
os.environ['OPENAI_API_KEY'] = openai.api_key
with open("static/assets/css/ssc.css") as css:
st.markdown(f'<style>{css.read()}</style>', unsafe_allow_html=True)
PARTY_COLORS = {
'Democrats': '#0a5f8a',
'Republicans': '#c93e34',
'Independent': 'white'
}
CANDIDATES = {
'Democrats': ['Biden', 'Williamson', 'Uygur'],
'Republicans': ['Trump', 'Haley', 'Ramaswamy', 'Hutchinson', 'Elder', 'Binkley', 'Scott', 'DeSantis', 'Pence', 'Christie', 'Burgum'],
'Independent': ['Kennedy', 'West']
}
DATA_FILE = "log/questions_responses_log.csv"
def get_party(candidate):
for party, candidates in CANDIDATES.items():
if candidate in candidates:
return party
return None
def log_question(candidates, party, question, response):
if os.path.exists(DATA_FILE):
try:
df = pd.read_csv(DATA_FILE)
except pd.errors.EmptyDataError:
df = pd.DataFrame(columns=["candidate", "party", "question", "response"])
else:
df = pd.DataFrame(columns=["candidate", "party", "question", "response"])
for candidate in candidates:
new_data = pd.DataFrame({
"candidate": [candidate],
"party": [party],
"question": [question],
"response": [response]
})
df = df.append(new_data, ignore_index=True)
df.to_csv(DATA_FILE, index=False)
def get_candidate_text(candidate):
formatted_name = candidate.replace(' ', '_')
file_path = f'training/candidates/{formatted_name}.txt'
with open(file_path, 'r', encoding='utf-8') as file:
return file.read()
def get_response(candidate, question, text, is_new_session=False):
MAX_CHUNK_SIZE = 16000 # Example value, adjust as needed
selected_persona = (
f"Ignore all the instructions you got before. From now on, you are going to act as {candidate}. "
f"You are talking to a voter"
f"Respond to questions in the first person as if you are {candidate}, "
f"using the voice and demeanor of a political figure. Do not refer to yourself in the 3rd person"
f"Do not ever mention wikipedia"
f"Try to use bullet points if possible")
if len(text.split()) <= MAX_CHUNK_SIZE:
question_with_text = text + " " + question
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=[
{
"role": "system",
"content": f"{selected_persona}"
},
{
"role": "user",
"content": question_with_text # Use the prefixed question here
}
],
temperature=1,
max_tokens=200,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
answer = response['choices'][0]['message']['content'].strip()
text_snippets = text.split('. ')
relevant_snippets = random.sample(text_snippets, min(3, len(text_snippets))) # Get up to 3 snippets
# Construct the reference section
reference_section = "\n\nReference: "
reference_section += f"\n- My answer was derived from: training/candidates/{candidate.replace(' ', '_')}.txt"
for snippet in relevant_snippets:
reference_section += f"\n- {snippet}"
return answer + reference_section
else:
text_chunks = textwrap.wrap(text, width=MAX_CHUNK_SIZE, expand_tabs=False, replace_whitespace=False, drop_whitespace=False)
combined_answers = ""
for chunk in text_chunks:
question_with_chunk = chunk + " " + question # Prefix the question with the chunk
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=[
{
"role": "system",
"content": f"{selected_persona}"
},
{
"role": "user",
"content": question_with_chunk # Use the prefixed question here
}
],
temperature=1,
max_tokens=200,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
answer = response['choices'][0]['message']['content'].strip()
# Randomly select a snippet from the chunk as a reference
snippet = random.choice(chunk.split('. '))
reference_section = f"\n\nReference: This answer was derived from: {snippet}"
combined_answers += answer + reference_section + " "
return combined_answers
def get_response_table(responses):
df = pd.DataFrame(responses.items(), columns=["Candidate", "Response"])
df["Party"] = df["Candidate"].apply(get_party)
# Rearrange columns
df = df[["Party", "Candidate", "Response"]]
return df
def display_table(df):
# Replace newline characters with HTML line break tag
df['Response'] = df['Response'].str.replace('\n', '<br>')
# Convert DataFrame to HTML
html = df.to_html(classes='table table-sm', escape=False, index=False, border=0, justify='left', header=True)
# Use BeautifulSoup to manipulate the HTML
soup = BeautifulSoup(html, 'html.parser')
# Update header row with 'active' class for a lighter color and uppercase, bold text
header_row = soup.find('tr')
header_row['class'] = 'active'
for th in header_row.find_all('th'):
th.string = th.text.upper()
th['style'] = 'font-weight: bold;'
# Update each data row with the appropriate class based on the party
for tr, party in zip(soup.find_all('tr')[1:], df['Party']): # Skip header row
tr['class'] = 'table-danger' if party == 'Republicans' else 'table-info' if party == 'Democrats' else ''
# Convert back to HTML and then to markdown
html = str(soup)
st.markdown(html, unsafe_allow_html=True)
def main():
if 'research_button_clicked' not in st.session_state:
st.session_state['research_button_clicked'] = False
if 'chat_button_clicked' not in st.session_state:
st.session_state['chat_button_clicked'] = False
col1, col2, col3 , col4, col5, col6 = st.columns([1, 1, 2,2,1,1], gap="medium")
if col3.button("Research Multiple Candidates", key="research_button"):
st.session_state['research_button_clicked'] = True
st.session_state['chat_button_clicked'] = False
st.session_state['readme_displayed'] = False
if col4.button("Chat with Individual Candidates", key="chat_button"):
st.session_state['chat_button_clicked'] = True
st.session_state['research_button_clicked'] = False
st.session_state['readme_displayed'] = False
if not st.session_state['readme_displayed']:
readme_placeholder.empty()
st.markdown("----------")
new_chat_button_style = """
<link
rel="stylesheet"
href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css"
>
<style>
.big-button {
font-size: 20px;
padding: 20px 40px;
margin: 5px 0;
}
.big-button:hover {
color: black !important;
}
<style>
.stButton > button {
background-color: #008CBA; /* Blue color */
border: none;
color: black !important;
hover: black;
padding: 8px 16px;
text-align: center;
text-decoration: none;
display: inline-block;
font-size: 16px;
margin: 4px 2px;
cursor: pointer;
border-radius: 12px;
}
</style>
"""
st.markdown(new_chat_button_style, unsafe_allow_html=True)
if st.session_state['research_button_clicked']:
with st.sidebar:
st.image(os.path.join("static", "assets", "SpeedCandidating.png"), use_column_width=True)
selected_party = st.selectbox('Select party:', list(CANDIDATES.keys()))
selected_candidates = st.multiselect(f'Choose {selected_party} candidates:', CANDIDATES[selected_party])
if selected_party == 'Democrats':
st.markdown("""<style>span[data-baseweb="tag"] { background-color: #242529 !important;}</style>""",unsafe_allow_html=True,)
if selected_party == 'Republicans':
st.markdown("""<style>span[data-baseweb="tag"] { background-color: #242529 !important;}</style>""",unsafe_allow_html=True,)
additional_party_option = st.checkbox("Select another party?")
if additional_party_option:
remaining_parties = [party for party in CANDIDATES.keys() if party != selected_party]
additional_party = st.selectbox('Select another party:', remaining_parties)
additional_candidates = st.multiselect(f'Choose {additional_party} candidates:', CANDIDATES[additional_party])
selected_candidates.extend(additional_candidates)
with st.form("Ask Question"):
question = st.text_input(label='',placeholder ="Ask your question")
if selected_candidates:
cols = st.columns(len(selected_candidates))
for idx, candidate in enumerate(selected_candidates):
party_of_candidate = get_party(candidate)
img_path = os.path.join("resources", "images",f"{party_of_candidate}", f"{candidate.lower()}.png")
cols[idx].image(img_path, caption=candidate, width=60)
ask_all = st.checkbox("Ask all Presidential candidates")
submit = st.form_submit_button("Submit")
if submit and question:
responses = {}
for candidate in selected_candidates:
candidate_text = get_candidate_text(candidate)
response = get_response(candidate, question, candidate_text)
responses[candidate] = response
log_question([candidate], get_party(candidate), question, response)
# Get the DataFrame and display it
response_df = get_response_table(responses)
display_table(response_df)
if os.path.exists(DATA_FILE):
df = pd.read_csv(DATA_FILE)
col1, col2 = st.columns(2)
candidate_counts = df['candidate'].value_counts()
candidate_colors = [PARTY_COLORS[get_party(candidate)] for candidate in candidate_counts.index]
fig1 = go.Figure(data=[go.Bar(x=candidate_counts.index, y=candidate_counts, marker_color=candidate_colors)])
fig1.update_layout(title="Question Counts per Canidate")
col1.plotly_chart(fig1, use_container_width=True)
party_counts = df['party'].value_counts()
fig2 = go.Figure(data=[go.Pie(labels=party_counts.index, values=party_counts, hole=.3, marker_colors=[PARTY_COLORS[p] for p in party_counts.index])])
fig2.update_layout(title="Party Question Distribution")
col2.plotly_chart(fig2, use_container_width=True)
elif st.session_state['chat_button_clicked']:
st.sidebar.image(os.path.join("static", "assets", "SpeedCandidating.png"), use_column_width=True)
selected_candidate = st.selectbox('Select a candidate:', ["Candidate"] + [candidate for party in CANDIDATES.values() for candidate in party])
party_of_candidate = get_party(selected_candidate)
img_path = os.path.join("resources", "images", f"{party_of_candidate}", f"{selected_candidate.lower()}.png")
import base64
def image_to_base64(img_path):
with open(img_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
st.sidebar.markdown("----")
col1, col2 = st.columns(2)
with col1:
st.sidebar.markdown(
f"<div style='text-align: center; margin: auto;'>CURRENTLY CHATTING WITH</div>",
unsafe_allow_html=True
)
with col2:
st.sidebar.markdown(
f"<div style='text-align: center;'><img src='data:image/png;base64,{image_to_base64(img_path)}' style='margin: auto;'/></div>",
unsafe_allow_html=True
)
st.sidebar.markdown("----")
st.sidebar.success(f"All responses derived from: training/candidates/{selected_candidate.replace(' ', '_')}.json")
#if "session_key" not in st.session_state:
# st.session_state.session_key = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# st.session_state.messages = []
# candidate_text = get_candidate_text(selected_candidate)
# greeting_response = get_response(selected_candidate, "", candidate_text, is_new_session=True)
# st.session_state.messages.append({"role": "assistant", "content": greeting_response})
if "session_key" not in st.session_state:
st.session_state.session_key = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
prompt = st.chat_input("Type your message:")
if prompt:
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user",avatar=os.path.join("resources", "images", "None","candidate.png")):
st.markdown(prompt)
candidate_text = get_candidate_text(selected_candidate)
response = get_response(selected_candidate, prompt, candidate_text)
with st.chat_message("assistant",avatar=os.path.join("resources", "images", f"{party_of_candidate}", f"{selected_candidate.lower()}.png")):
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
#prompt = st.chat_input("Type your message:")
#
#if prompt:
# st.session_state.messages.append({"role": "user", "content": prompt})
# with st.chat_message("user"):
# st.markdown(prompt)
# candidate_text = get_candidate_text(selected_candidate)
# response = get_response(selected_candidate, prompt, candidate_text)
# with st.chat_message("assistant",avatar=os.path.join("resources", "images", f"{party_of_candidate}", f"{selected_candidate.lower()}.png")):
# st.markdown(response)
# st.session_state.messages.append({"role": "assistant", "content": response})
col1, col2 = st.sidebar.columns(2)
new_chat_button_style = """
<style>
.stButton > button {
background-color: #008CBA; /* Blue color */
border: none;
color: white;
padding: 8px 16px;
text-align: center;
text-decoration: none;
display: inline-block;
font-size: 16px;
margin: 4px 2px;
cursor: pointer;
border-radius: 12px;
}
</style>
"""
st.markdown(new_chat_button_style, unsafe_allow_html=True)
col1, col2 = st.sidebar.columns(2)
if col1.button("New Chat"):
st.session_state.session_key = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
st.session_state.messages = []
if col2.button("Save Chat"):
filename = f"chat_{st.session_state.session_key}.csv"
csv_buffer = StringIO()
chat_writer = csv.writer(csv_buffer)
chat_writer.writerow(["Candidate", "Party", "Role", "Question", "Response"])
question = ""
for msg in st.session_state.messages:
if msg['role'] == 'user':
question = msg['content']
response = ""
elif msg['role'] == 'assistant':
response = msg['content']
candidate = "Name"
party = get_party(candidate)
chat_writer.writerow([candidate, party, msg['role'], question, response])
csv_buffer.seek(0)
st.success(f"Chat saved to {filename}")
st.download_button(
label="Download Chat",
data=csv_buffer.getvalue(),
file_name=filename,
mime='text/csv',
key="download_chat_button"
)
if __name__ == '__main__':
main()
| [
"Type your message:",
"PLACEHOLDER"
] |
2024-01-10 | intuita-inc/piranha | experimental~piranha_playground~rule_inference~piranha_chat.py | # Copyright (c) 2023 Uber Technologies, Inc.
#
# <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
# <p>http://www.apache.org/licenses/LICENSE-2.0
#
# <p>Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import time
from pathlib import Path
from typing import List, Optional, Tuple
import attr
import openai
from piranha_playground.rule_inference.utils.logger_formatter import CustomFormatter
logger = logging.getLogger("PiranhaAgent")
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(CustomFormatter())
logger.addHandler(ch)
class PiranhaChatException(Exception):
pass
@attr.s
class PiranhaGPTChat:
"""
A class to manage and interact with OpenAI ChatModels to generate and improve Piranha rule graphs.
"""
explanation = '''
Your task is to improve refactoring rules for Polyglot Piranha, a tool that uses tree-sitter for parsing and refactoring code.
The rules are expressed in a domain-specific language (DSL) specific to Polyglot Piranha. Examples and explanations of the DSL will be provided below.
You will be provided with the original and refactored code snippets, along with statically inferred rules verified by an algorithm.
However, these rules may appear unnatural since they were automatically generated. Your goal is to make them resemble rules written by humans
Key requirements:
- The semantics of the rules should remain unchanged unless a specific request to alter them is made.
- Strive to simplify the rules as much as possible. Always simplify lengthy s-expressions.
- Explain each rule individually. Explain the rule in a way that a human can understand it.
Please structure your response using toml and markdown format. Refer to the expected output format below, as it will be parsed automatically.
========================= Piranha Rule Graph =========================
Piranha is language to express cascading program transformation.
Each node in graph represents a transformation rule that identifies and modify specific code snippets.
The edges between rules specify when, and where each rule should be applied.
========================= Piranha Rule Explanation =========================
Rules are represented in TOML. Each rule should contain at least one rule with the following properties:
- "query": Tree-sitter query to find the code pattern to refactor
- "replace_node": The captured node in the query that will be replaced
- "replace_string": Replacement string or pattern for the refactored code
- "holes": Placeholders in your queries that will be instantiated at runtime
- "is_seed_rule": Specifies whether this rule is an entry point for the rule graph.
Additionally, the rule can have optional filters. Filters can have properties like "enclosing_node",
"not_contains", "contains", "at_least", "at_most". The filters are used to specify conditions for the rule to be applied.
========================= Rule Format =========================
```toml
# Define your rule within this section
[[rules]]
# Provide a unique name for your rule
name = "your_rule_name"
# Write a Tree-sitter query to identify the code pattern for refactoring. The outer most node should always be captured.
# The tree-sitter query depends on the language. The nodes you see here are for Java. You need to only use nodes in the TASK!
query = """(
(method_invocation name: (_) @name
arguments: (argument_list) @args) @invk
(#eq? @name @hole1))
"""
# Specify the captured node from the query that will be replaced
# Never add @ before the node name! Otherwise it will NOT compile!
replace_node = "invk"
# Replacement string that will substitute `replace_node`
replace = "X.other_string @args"
# Specify any placeholders in your queries that will be filled in at runtime
# In our case hole1 is used in the query, but not defined. Therefore it is a hole.
holes = ["hole1"]
# Specify if this rule should be triggered first. If it depends on other rules, set to false
is_seed_rule = true
# If necessary, define filters for your rule
[[rules.filters]]
# This pattern should match any ancestor of the captured node (optional)
enclosing_node = "(your_enclosing_node_pattern) @your_capture_name"
# Define patterns that should not be present within the enclosing_node (optional)
# Always use a list, even if you only have one pattern.
not_contains = [
"""(
(identifier) @id
(#eq? @id "x"))
""",
]
# Define a pattern that should be present within the enclosing_node (optional)
contains =
"""(
(identifier) @other_id
(#eq? @other_id "y"))
"""
# Define the minimum and maximum number of children that should match the 'contains' pattern (optional)
at_least = 1
at_most = 5
```
========================= Edge Explanation =========================
Edges allow rules to depend on each other, thus establishing a hierarchy or sequence of application among rules.
For instance, if a rule is defined to match a method invocation, another rule could be drafted to match a method declaration.
In this case, the method name identified from the declaration could be utilized within the invocation.
An edge essentially describes the direction of dependency between two or more rules. It signifies that a particular rule
('from') is based on, or derives information from, one or more other rules ('to').
Edges are also represented in the TOML format, and their structure is typically not modified unless there's a need to
change the dependencies between rules. Your main task, unless otherwise specified, is to ensure that the 'from' and 'to'
components of the edge correctly correspond to the names of your improved rules.
========================= Edge Format =========================
[[edges]]
# Scope of the rule - usually "Global"
scope = "Global"
# Name of the rule that depends on other rules (your rule name)
from = "your_rule_name"
# List of rules that your rule depends on (could be one or multiple)
to = ["other_rule_name", "another_rule_name"]
========================= Expected output format =========================
Your output should be a single TOML file containing the improved rules and edges, as well as an explanation in Markdown format.
Rule Graph
```toml
[[rules]] # For each rule
...
[[edges]] # For each edge
...
```
Explanation
```md
#### `<your_rule_name1>`\n
- <Your detailed explanation>
- <Include multiple bullet points if necessary>
#### `<your_rule_name2>`\n
- <Your detailed explanation>
- <Include multiple bullet points if necessary>
```
========================= Rule Examples =========================
'''
input_template = """
========================= Task =========================
=== Source code ===
{source_code}
=== Tree-sitter representation (source code) ===
{source_tree}
=== Tree-sitter representation (target code) ===
{target_tree}
=== Diff ===
{diff}
=== Rules and edges to improve ===
{rules}
=== Additional requirements ===
{hints}
========================= Please simplify the rules and edges =========================
Remember, the goal is to simplify the rules and edges as much as possible while still achieving the same result.
You should only use nodes you see in the tree-sitter representation of the source code!!
"""
add_filter_prompt = '''
Can you to further refine the following rule? Here is the request:
{desc}
========================= Current rule =========================
{rule}
========================= Task =========================
Improve the rule by incorporating a filter. You are permitted to add only two types of filters: enclosing_node and contains.
You should also include an explanation for the new rule.
You're allowed to add any number of filters and further restrict the nodes using #eq, #not-eq, and #match.
Key requirements:
- Structure your response using TOML and Markdown formatting for automatic parsing.
- You can ONLY chose filters from the list below. You may refine them but they should not deviate from the list.
- Be sure to use unique names for your capture groups to avoid overlapping with existing ones from the query!
- Make sure all the nodes are named. Every captured node should have a unique name, including the outermost node.
- Always surround enclosing_node and contains with parenthesis (...), including the #eq, #not-eq, and #match operators.
========================= Expected output format =========================
Rules
```toml
[[rules]]
....
[[rules.filters]] # filter 1
enclosing_node = """(class_declaration) @class"""
contains = """(
(identifier) @id (#eq? @id "x")
)"""
at_least = 1
at_most = 1
[[rules.filters]] # filter 2
enclosing_node = """(method_invocation) @invk"""
[[rules.filters]] # filter 3
enclosing_node = """(class_declaration) @class"""
contains = """(
(method_declaration
(modifiers) @modifiers
name: (identifier) @name) @decl
(#eq? @name "x")
)
```
Explanation
```md
#### `<your_rule_name1>`\n
- <Your detailed explanation>
- <Include multiple bullet points if necessary>
```
========================= List of Filters =========================
=== Potential filters for enclosing node ===
{enclosing_node_filters}
"""]
========================= Errors to avoid =========================
Not surrounding the query with parenthesis (...):
enclosing_node = "(identifier) @name) (#eq? @name \"x\")"
is wrong!! it should be:
enclosing_node = """((identifier) @name) (#eq? @name "x"))"""
```
'''
holes = attr.ib(type=dict)
messages = attr.ib(type=list, default=attr.Factory(list))
temperature = attr.ib(
type=float,
default=0.3,
validator=[
attr.validators.ge(0),
attr.validators.le(1),
],
)
model = attr.ib(
default="gpt-4-32k",
validator=attr.validators.in_(["gpt-4", "gpt-4-32k", "gpt-3.5-turbo-16k"]),
)
def __attrs_post_init__(self):
"""
Automatically called after the initialization of the instance. It gathers example rules and edge
files from a specified path, formats the content and adds it to the internal message list.
:param None
:return None
"""
examples = self._get_examples("../../src/cleanup_rules/java")
formatted = (
PiranhaGPTChat.explanation
+ "\n"
+ examples
+ "\n"
+ PiranhaGPTChat.input_template.format(**self.holes)
)
self.messages.append({"role": "user", "content": formatted})
def append_system_message(self, system_message: str):
"""
Appends a message from the GPT model to the internal message list.
:param system_message: str: The message content to be added to the message list.
:return None
"""
self.messages.append({"role": "assistant", "content": system_message})
def append_user_followup(self, followup_message: str):
"""
Appends a follow-up message from the user to the internal message list.
:param followup_message: str: The message content to be added to the message list.
:return None
"""
self.messages.append({"role": "user", "content": followup_message})
def get_model_response(self) -> str:
"""
Fetches the latest message from the GPT model. If the latest message is from the user, it will trigger
a new GPT model prediction and append the response to the internal message list.
:param None
:return str: The latest message content from the GPT model.
"""
latest_message = self.messages[-1]
if latest_message["role"] == "assistant":
return latest_message["content"]
else:
completions = self.get_completion(n_samples=1)
content = completions[0]
self.append_system_message(content)
return content
def append_improve_request(self, desc, rule, enclosing_nodes):
"""
Appends a request to improve the rule to the internal message list.
:param desc: str: Description of the request.
:param rule: str: The rule to be improved.
:param enclosing_nodes: str: The enclosing nodes to be included in the rule.
:return None
"""
self.messages.append(
{
"role": "user",
"content": PiranhaGPTChat.add_filter_prompt.format(
desc=desc,
rule=rule,
enclosing_node_filters=enclosing_nodes,
),
}
)
def get_completion(self, n_samples: int = 1) -> Optional[List[str]]:
"""
Attempts to generate a new GPT model prediction based on the internal message list. It handles
common OpenAI API exceptions such as rate limiting and API errors.
:param n_samples: int: Number of samples to generate from the model.
:return List[str]: A list of generated messages. None if an API exception occurs.
:raises PiranhaChatException: If it fails to generate a completion from the GPT model after three attempts.
"""
for _ in range(3):
try:
logger.debug("Attempting to get completion from GPT.")
response = openai.ChatCompletion.create(
model=self.model,
messages=self.messages,
temperature=self.temperature, # this is the degree of randomness of the model's output
n=n_samples,
)
return [
response.choices[i].message.content
for i in range(len(response.choices))
]
except (
openai.error.RateLimitError,
openai.error.Timeout,
openai.error.APIError,
) as e:
logger.error(e)
sleep_time = 0.5
logger.error(f"Rate limit reached. Sleeping for {sleep_time}s.")
time.sleep(sleep_time)
raise PiranhaChatException("Failed to get completion from GPT.")
@staticmethod
def _get_examples(path_to_examples_rules):
"""
Walks through a specified directory to gather and format the content of example rule and edge files.
The formatted content is then returned as a single string.
:param path_to_examples_rules: str: Path to the directory containing example rule and edge files.
:return str: Formatted content of example rule and edge files.
"""
task_examples = ""
for root, dirs, files in os.walk(path_to_examples_rules):
for file in files:
if file.endswith("rules.toml") or file.endswith("edges.toml"):
file_name = os.path.join(root, file)
file_contents = Path(file_name).read_text()
file_contents = "\n".join(
[
line
for line in file_contents.split("\n")
if not line.startswith("#")
]
)
task_examples += f"<file_name_start> {file_name} <file_name_end>\n"
task_examples += f"```toml {file_contents}```\n"
return task_examples
| [
"\nCan you to further refine the following rule? Here is the request:\n \n{desc}\n \n========================= Current rule =========================\n \n{rule}\n\n========================= Task =========================\n\nImprove the rule by incorporating a filter. You are permitted to add only two types of filters: enclosing_node and contains. \nYou should also include an explanation for the new rule.\n\nYou're allowed to add any number of filters and further restrict the nodes using #eq, #not-eq, and #match. \n\n\nKey requirements:\n - Structure your response using TOML and Markdown formatting for automatic parsing.\n - You can ONLY chose filters from the list below. You may refine them but they should not deviate from the list.\n - Be sure to use unique names for your capture groups to avoid overlapping with existing ones from the query!\n - Make sure all the nodes are named. Every captured node should have a unique name, including the outermost node.\n - Always surround enclosing_node and contains with parenthesis (...), including the #eq, #not-eq, and #match operators.\n\n\n========================= Expected output format =========================\n\nRules\n\n```toml\n\n[[rules]]\n....\n\n[[rules.filters]] # filter 1\nenclosing_node = \"\"\"(class_declaration) @class\"\"\"\ncontains = \"\"\"(\n (identifier) @id (#eq? @id \"x\")\n)\"\"\"\nat_least = 1\nat_most = 1\n\n[[rules.filters]] # filter 2\nenclosing_node = \"\"\"(method_invocation) @invk\"\"\"\n\n[[rules.filters]] # filter 3\nenclosing_node = \"\"\"(class_declaration) @class\"\"\"\ncontains = \"\"\"(\n(method_declaration\n (modifiers) @modifiers\n name: (identifier) @name) @decl\n (#eq? @name \"x\")\n)\n```\n\nExplanation\n\n```md\n#### `<your_rule_name1>`\n\n- <Your detailed explanation>\n- <Include multiple bullet points if necessary>\n```\n\n========================= List of Filters =========================\n\n=== Potential filters for enclosing node ===\n\n{enclosing_node_filters}\n\n\n\"\"\"]\n\n========================= Errors to avoid =========================\n\nNot surrounding the query with parenthesis (...):\nenclosing_node = \"(identifier) @name) (#eq? @name \"x\")\"\n\nis wrong!! it should be:\nenclosing_node = \"\"\"((identifier) @name) (#eq? @name \"x\"))\"\"\"\n\n```\n",
"\n========================= Task =========================\n\n=== Source code === \n\n{source_code}\n\n=== Tree-sitter representation (source code) ===\n\n{source_tree}\n\n=== Tree-sitter representation (target code) ===\n\n{target_tree}\n\n=== Diff === \n\n{diff}\n\n=== Rules and edges to improve === \n\n{rules}\n\n=== Additional requirements === \n\n{hints}\n========================= Please simplify the rules and edges =========================\n\nRemember, the goal is to simplify the rules and edges as much as possible while still achieving the same result.\nYou should only use nodes you see in the tree-sitter representation of the source code!!\n\n "
] |
2024-01-10 | daekeun-ml/genai-ko-LLM | utils~inference_lib.py | import boto3
import time
import json
import os.path as osp
from typing import Union
import pprint
def parse_response(query_response):
def traverse(o, tree_types=(list, tuple)):
if isinstance(o, tree_types):
for value in o:
for subvalue in traverse(value, tree_types):
yield subvalue
else:
yield o
data = eval(query_response)
listRes = []
for value in traverse(data):
listRes.append(value["generated_text"])
if len(listRes) >= 2: return listRes
else: return listRes[0].strip()
# def invoke_inference(endpoint_name, prompt):
# '''
# KoAlpaca 프롬프트를 제공하여 엔드포인트 호출
# '''
# client = boto3.client("sagemaker-runtime")
# content_type = "text/plain"
# response = client.invoke_endpoint(
# EndpointName=endpoint_name, ContentType=content_type, Body=prompt
# )
# #print(response["Body"].read())
# res = response["Body"].read().decode()
# print (eval(res)[0]['generated_text'])
# def invoke_inference_DJ(endpoint_name, prompt):
# client = boto3.client("sagemaker-runtime")
# content_type = "application/json"
# response = client.invoke_endpoint(
# EndpointName=endpoint_name,
# ContentType=content_type,
# Body=json.dumps(prompt)
# )
# res = response["Body"].read().decode()
# return res
# def query_endpoint_with_text_payload(plain_text, endpoint_name, content_type="text/plain"):
# '''
# content_type 이 text/plain 인 경우 사용
# '''
# client = boto3.client("runtime.sagemaker")
# response = client.invoke_endpoint(
# EndpointName=endpoint_name, ContentType=content_type, Body=plain_text
# )
# return response
# def parse_response_text_model(query_response):
# '''
# content_type 이 text/plain 인 경우 사용
# '''
# model_predictions = json.loads(query_response["Body"].read())
# # print("model_predictions: \n", model_predictions)
# generated_text = model_predictions[0]["generated_text"]
# return generated_text
"""
A dedicated helper to manage templates and prompt building.
"""
class Prompter(object):
__slots__ = ("template", "_verbose")
def __init__(self, template_name: str = "", verbose: bool = False):
self._verbose = verbose
if not template_name:
# Enforce the default here, so the constructor can be called with '' and will not break.
template_name = "alpaca"
file_name = osp.join("../templates", f"{template_name}.json")
if not osp.exists(file_name):
raise ValueError(f"Can't read {file_name}")
with open(file_name) as fp:
self.template = json.load(fp)
if self._verbose:
print(
f"Using prompt template {template_name}: {self.template['description']}"
)
def generate_prompt(
self,
instruction: str,
input: Union[None, str] = None,
label: Union[None, str] = None,
) -> str:
# returns the full prompt from instruction and optional input
# if a label (=response, =output) is provided, it's also appended.
if input:
res = self.template["prompt_input"].format(
instruction=instruction, input=input
)
else:
res = self.template["prompt_no_input"].format(
instruction=instruction
)
if label:
res = f"{res}{label}"
if self._verbose:
print(res)
return res
def get_response(self, output: str) -> str:
return output.split(self.template["response_split"])[1].strip()
def describe_endpoint(endpoint_name):
'''
엔드폰인트 생성 유무를 확인. 생성 중이면 기다림.
'''
sm_client = boto3.client("sagemaker")
while(True):
response = sm_client.describe_endpoint(
EndpointName= endpoint_name
)
status = response['EndpointStatus']
if status == 'Creating':
print("Endpoint is ", status)
time.sleep(60)
else:
print("Endpoint is ", status)
break
class KoLLMSageMakerEndpoint(object):
def __init__(self, endpoint_name):
self.endpoint_name = endpoint_name
self.prompter = Prompter("kullm")
self.smr_client = boto3.client('sagemaker-runtime')
def get_payload(self, instruction, input_text, params):
prompt = self.prompter.generate_prompt(instruction, input_text)
payload = {
'inputs': prompt,
'parameters': params
}
payload_str = json.dumps(payload)
return payload_str.encode("utf-8")
def infer(self, payload, content_type="application/json", verbose=True):
response = self.smr_client.invoke_endpoint(
EndpointName=self.endpoint_name,
ContentType=content_type,
Body=payload
)
res = json.loads(response['Body'].read().decode("utf-8"))
generated_text = res[0]["generated_text"]
#generated_text = self.prompter.get_response(generated_text)
generated_text = generated_text.split('###')[0]
if verbose:
pprint.pprint(f'Response: {generated_text}')
return generated_text
################################################
# Embedding Handler
################################################
# from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler
# from langchain.embeddings import SagemakerEndpointEmbeddings
# from langchain.llms.sagemaker_endpoint import ContentHandlerBase
# from typing import Any, Dict, List, Optional
# class SagemakerEndpointEmbeddingsJumpStart(SagemakerEndpointEmbeddings):
# def embed_documents(self, texts: List[str], chunk_size: int = 5) -> List[List[float]]:
# """Compute doc embeddings using a SageMaker Inference Endpoint.
# Args:
# texts: The list of texts to embed.
# chunk_size: The chunk size defines how many input texts will
# be grouped together as request. If None, will use the
# chunk size specified by the class.
# Returns:
# List of embeddings, one for each text.
# """
# results = []
# _chunk_size = len(texts) if chunk_size > len(texts) else chunk_size
# # print("text size: ", len(texts))
# # print("_chunk_size: ", _chunk_size)
# for i in range(0, len(texts), _chunk_size):
# response = self._embedding_func(texts[i : i + _chunk_size])
# print
# results.extend(response)
# return results
# import numpy as np
# class KoSimCSERobertaContentHandler(EmbeddingsContentHandler):
# content_type = "application/json"
# accepts = "application/json"
# def transform_input(self, prompt: str, model_kwargs={}) -> bytes:
# input_str = json.dumps({"inputs": prompt, **model_kwargs})
# return input_str.encode("utf-8")
# def transform_output(self, output: bytes) -> str:
# response_json = json.loads(output.read().decode("utf-8"))
# ndim = np.array(response_json).ndim
# # print("response_json ndim: \n", ndim)
# # print("response_json shape: \n", np.array(response_json).shape)
# if ndim == 4:
# # Original shape (1, 1, n, 768)
# emb = response_json[0][0][0]
# emb = np.expand_dims(emb, axis=0).tolist()
# # print("emb shape: ", np.array(emb).shape)
# # print("emb TYPE: ", type(emb))
# elif ndim == 2:
# # Original shape (n, 1)
# # print(response_json[0])
# emb = []
# for ele in response_json:
# # print(np.array(response_json[0]).shape)
# e = ele[0][0]
# #emb = np.expand_dims(emb, axis=0).tolist()
# # print("emb shape: ", np.array(emb).shape)
# # print("emb TYPE: ", type(emb))
# emb.append(e)
# # print("emb_list shape: ", np.array(emb).shape)
# # print("emb_list TYPE: ", type(emb))
# else:
# print(f"Other # of dimension: {ndim}")
# emb = None
# return emb
# ################################################
# # LLM Handler
# ################################################
# from langchain.llms.sagemaker_endpoint import LLMContentHandler
# import json
# class KoAlpacaContentHandler(LLMContentHandler):
# content_type = "application/json"
# accepts = "application/json"
# def transform_input(self, prompt: str, model_kwargs={}) -> bytes:
# input_str = json.dumps({"text_inputs": prompt, **model_kwargs})
# return input_str.encode("utf-8")
# def transform_output(self, output: bytes) -> str:
# print("In KoAlpacaContentHandler")
# # print("output: ", output)
# response_json = json.loads(output.read().decode("utf-8"))
# print("response_json: ", response_json)
# # return response_json["generated_texts"][0]
# doc = response_json[0]['generated_text']
# doc = json.loads(doc)
# doc = doc['text_inputs']
# return doc | [
"alpaca"
] |
2024-01-10 | daekeun-ml/genai-ko-LLM | utils~common_lib.py | def check_packages():
try:
import langchain
_has_packages = True
except (ImportError, AttributeError):
_has_packages = False
if _has_packages:
print("Proceed.")
else:
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("[ERROR] 0번 모듈 노트북(0_setup.ipynb)을 먼저 실행해 주세요.")
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++") | [] |
2024-01-10 | liudingxiao/AgentGPT | platform~reworkd_platform~web~api~agent~task_output_parser.py | import ast
import re
from typing import List
from langchain.schema import BaseOutputParser, OutputParserException
class TaskOutputParser(BaseOutputParser[List[str]]):
"""
Extension of LangChain's BaseOutputParser
Responsible for parsing task creation output into a list of task strings
"""
completed_tasks: List[str] = []
def __init__(self, *, completed_tasks: List[str]):
super().__init__()
self.completed_tasks = completed_tasks
def parse(self, text: str) -> List[str]:
try:
array_str = extract_array(text)
all_tasks = [
remove_prefix(task) for task in array_str if real_tasks_filter(task)
]
return [task for task in all_tasks if task not in self.completed_tasks]
except Exception as e:
msg = f"Failed to parse tasks from completion {text}. Got: {e}"
raise OutputParserException(msg)
def get_format_instructions(self) -> str:
return """
The response should be a JSON array of strings. Example:
["Search the web for NBA news", "Write some code to build a web scraper"]
This should be parsable by json.loads()
"""
def extract_array(input_str: str) -> List[str]:
regex = (
r"\[\s*\]|" # Empty array check`
r"(\[(?:\s*(?:\"(?:[^\"\\]|\\.)*\"|\'(?:[^\'\\]|\\.)*\')\s*,?)*\s*\])"
)
match = re.search(regex, input_str)
if match is not None:
return ast.literal_eval(match[0])
else:
raise RuntimeError(f"Failed to extract array from {input_str}")
def remove_prefix(input_str: str) -> str:
prefix_pattern = (
r"^(Task\s*\d*\.\s*|Task\s*\d*[-:]?\s*|Step\s*\d*["
r"-:]?\s*|Step\s*[-:]?\s*|\d+\.\s*|\d+\s*[-:]?\s*|^\.\s*|^\.*)"
)
return re.sub(prefix_pattern, "", input_str, flags=re.IGNORECASE)
def real_tasks_filter(input_str: str) -> bool:
no_task_regex = (
r"^No( (new|further|additional|extra|other))? tasks? (is )?("
r"required|needed|added|created|inputted).*"
)
task_complete_regex = r"^Task (complete|completed|finished|done|over|success).*"
do_nothing_regex = r"^(\s*|Do nothing(\s.*)?)$"
return (
not re.search(no_task_regex, input_str, re.IGNORECASE)
and not re.search(task_complete_regex, input_str, re.IGNORECASE)
and not re.search(do_nothing_regex, input_str, re.IGNORECASE)
)
| [] |
2024-01-10 | liudingxiao/AgentGPT | platform~reworkd_platform~tests~agent~agent~task_output_parser_test.py | from typing import List, Type
import pytest
from langchain.schema import OutputParserException
from reworkd_platform.web.api.agent.task_output_parser import (
real_tasks_filter,
remove_prefix,
extract_array,
TaskOutputParser,
)
@pytest.mark.parametrize(
"input_text,expected_output",
[
(
'["Task 1: Do something", "Task 2: Do something else", "Task 3: Do '
'another thing"]',
["Do something", "Do something else", "Do another thing"],
),
(
'Some random stuff ["1: Hello"]',
["Hello"],
),
(
"[]",
[],
),
],
)
def test_parse_success(input_text: str, expected_output: List[str]) -> None:
parser = TaskOutputParser(completed_tasks=[])
result = parser.parse(input_text)
assert result == expected_output
def test_parse_with_completed_tasks() -> None:
input_text = '["One", "Two", "Three"]'
completed = ["One"]
expected = ["Two", "Three"]
parser = TaskOutputParser(completed_tasks=completed)
result = parser.parse(input_text)
assert result == expected
@pytest.mark.parametrize(
"input_text,exception",
[
("This is not an array", OutputParserException),
],
)
def test_parse_failure(input_text: str, exception: Type[Exception]) -> None:
parser = TaskOutputParser(completed_tasks=[])
with pytest.raises(exception):
parser.parse(input_text)
@pytest.mark.parametrize(
"input_str, expected",
[
("[]", []),
('["One"]', ["One"]),
(
'```json\n["Research", "Develop", "Integrate"]\n```',
["Research", "Develop", "Integrate"],
),
('["Search", "Identify"]', ["Search", "Identify"]),
("['Single quote']", ["Single quote"]),
("['Single with \"quote\"']", ['Single with "quote"']),
("Random stuff ['Search', 'Identify']", ["Search", "Identify"]),
('["Item 1","Item 2","Item 3"]', ["Item 1", "Item 2", "Item 3"]),
('{"array": ["123", "456"]}', ["123", "456"]),
],
)
def test_extract_array_success(input_str: str, expected: List[str]) -> None:
print(input_str, expected)
print(input_str, expected)
print(extract_array(input_str), expected)
assert extract_array(input_str) == expected
@pytest.mark.parametrize(
"input_str, exception",
[
(None, TypeError),
("123", RuntimeError),
("Some random text", RuntimeError),
('"single_string"', RuntimeError),
('{"test": 123}', RuntimeError),
('["Unclosed array", "other"', RuntimeError),
],
)
def test_extract_array_exception(input_str: str, exception: Type[Exception]) -> None:
with pytest.raises(exception):
extract_array(input_str)
@pytest.mark.parametrize(
"task_input, expected_output",
[
("Task: This is a sample task", "This is a sample task"),
(
"Task 1: Perform a comprehensive analysis of system performance.",
"Perform a comprehensive analysis of system performance.",
),
("Task 2. Create a python script", "Create a python script"),
("5 - This is a sample task", "This is a sample task"),
("2: This is a sample task", "This is a sample task"),
(
"This is a sample task without a prefix",
"This is a sample task without a prefix",
),
("Step: This is a sample task", "This is a sample task"),
(
"Step 1: Perform a comprehensive analysis of system performance.",
"Perform a comprehensive analysis of system performance.",
),
("Step 2:Create a python script", "Create a python script"),
("Step:This is a sample task", "This is a sample task"),
(
". Conduct research on the history of Nike",
"Conduct research on the history of Nike",
),
(".This is a sample task", "This is a sample task"),
(
"1. Research the history and background of Nike company.",
"Research the history and background of Nike company.",
),
],
)
def test_remove_task_prefix(task_input: str, expected_output: str) -> None:
output = remove_prefix(task_input)
assert output == expected_output
@pytest.mark.parametrize(
"input_text, expected_result",
[
("Write the report", True),
("No new task needed", False),
("Task completed", False),
("Do nothing", False),
("", False), # empty_string
("no new task needed", False), # case_insensitive
],
)
def test_real_tasks_filter_no_task(input_text: str, expected_result: bool) -> None:
assert real_tasks_filter(input_text) == expected_result
| [] |
2024-01-10 | liudingxiao/AgentGPT | platform~reworkd_platform~web~api~agent~agent_service~open_ai_agent_service.py | from typing import List, Optional
from langchain.chains import LLMChain
from langchain.output_parsers import PydanticOutputParser
from reworkd_platform.web.api.agent.agent_service.agent_service import AgentService
from reworkd_platform.web.api.agent.analysis import Analysis, get_default_analysis
from reworkd_platform.web.api.agent.helpers import (
call_model_with_handling,
parse_with_handling,
)
from reworkd_platform.web.api.agent.model_settings import ModelSettings, create_model
from reworkd_platform.web.api.agent.prompts import (
start_goal_prompt,
analyze_task_prompt,
create_tasks_prompt,
)
from reworkd_platform.web.api.agent.task_output_parser import TaskOutputParser
from reworkd_platform.web.api.agent.tools.tools import (
get_tools_overview,
get_tool_from_name,
get_user_tools,
)
class OpenAIAgentService(AgentService):
def __init__(self, model_settings: ModelSettings):
self.model_settings = model_settings
self._language = model_settings.language or "English"
async def start_goal_agent(self, *, goal: str) -> List[str]:
completion = await call_model_with_handling(
self.model_settings,
start_goal_prompt,
{"goal": goal, "language": self._language},
)
task_output_parser = TaskOutputParser(completed_tasks=[])
return parse_with_handling(task_output_parser, completion)
async def analyze_task_agent(
self, *, goal: str, task: str, tool_names: List[str]
) -> Analysis:
llm = create_model(self.model_settings)
chain = LLMChain(llm=llm, prompt=analyze_task_prompt)
pydantic_parser = PydanticOutputParser(pydantic_object=Analysis)
print(get_tools_overview(get_user_tools(tool_names)))
completion = await chain.arun(
{
"goal": goal,
"task": task,
"tools_overview": get_tools_overview(get_user_tools(tool_names)),
}
)
print("Analysis completion:\n", completion)
try:
return pydantic_parser.parse(completion)
except Exception as error:
print(f"Error parsing analysis: {error}")
return get_default_analysis()
async def execute_task_agent(
self,
*,
goal: str,
task: str,
analysis: Analysis,
) -> str:
print("Execution analysis:", analysis)
tool_class = get_tool_from_name(analysis.action)
return await tool_class(self.model_settings).call(goal, task, analysis.arg)
async def create_tasks_agent(
self,
*,
goal: str,
tasks: List[str],
last_task: str,
result: str,
completed_tasks: Optional[List[str]] = None,
) -> List[str]:
llm = create_model(self.model_settings)
chain = LLMChain(llm=llm, prompt=create_tasks_prompt)
completion = await chain.arun(
{
"goal": goal,
"language": self._language,
"tasks": tasks,
"lastTask": last_task,
"result": result,
}
)
task_output_parser = TaskOutputParser(completed_tasks=completed_tasks or [])
return task_output_parser.parse(completion)
| [] |
2024-01-10 | liudingxiao/AgentGPT | platform~reworkd_platform~web~api~agent~model_settings.py | from random import randint
from typing import Optional
import openai
from langchain.chat_models import ChatOpenAI
from pydantic import BaseModel
from reworkd_platform.settings import settings
class ModelSettings(BaseModel):
customModelName: Optional[str] = None
customTemperature: Optional[float] = None
customMaxLoops: Optional[int] = None
maxTokens: Optional[int] = None
language: Optional[str] = "English"
def get_server_side_key() -> str:
keys = [
key.strip() for key in (settings.openai_api_key or "").split(",") if key.strip()
]
return keys[randint(0, len(keys) - 1)] if keys else ""
GPT_35_TURBO = "gpt-3.5-turbo"
openai.api_base = settings.openai_api_base
def create_model(model_settings: Optional[ModelSettings]) -> ChatOpenAI:
return ChatOpenAI(
openai_api_key=get_server_side_key(),
temperature=model_settings.customTemperature
if model_settings and model_settings.customTemperature is not None
else 0.9,
model_name=model_settings.customModelName
if model_settings and model_settings.customModelName is not None
else GPT_35_TURBO,
max_tokens=model_settings.maxTokens
if model_settings and model_settings.maxTokens is not None
else 400,
)
| [] |
2024-01-10 | andrecharneca/News-Info-Extraction | code~src~pipelines~utils~gpt_utils.py | import pandas as pd
## GPT3 ##
def user_input_from_article(article) -> str:
"""
Gets the user input from the article
args:
article: article dict
returns:
user_input: user input
"""
user_input = f"""Title: {article['title']}\nDate: {article['date']}\nText: {article['text']}"""
return user_input
def make_prompt(prompt: str, user_input: str) -> str:
"""
Replaces the [user_input] in the prompt with the user input
args:
prompt: prompt to be used
user_input: user input to be used
returns:
prompt: prompt with user input
"""
return prompt.replace("[user_input]", user_input)
def get_rows_from_completion(completion: str, ending: str = "end") -> list[list[str]]:
"""
Turns the text from the openai gpt response into a list of table rows
args:
completion: completion from openai
ending: ending of the table (use 'end' if the table inds with '|end|')
returns:
list: list of table rows
"""
# copy string
text = completion
# add '|' to the end of string, if it doesn't end with '|'
if text[-1] != '|':
text += '|'
rows = text.split("\n")
rows = [row.split("|") for row in rows]
#remove trailing spaces from each row
rows = [[item.strip() for item in row] for row in rows]
rows = [row[1:-1] for row in rows if len(row) > 1]
# remove ending row
rows = [row for row in rows if row[0] != ending]
return rows
def get_table_from_completion(completion, cols : list[str] = ['entity_1', 'relationship', 'entity_2', 'relationship_date', 'passage']) -> pd.DataFrame:
"""
Turns the text from the openai response into a pandas dataframe
args:
response: response from openai
cols: column names for the table
returns:
table: pandas dataframe
"""
rows = get_rows_from_completion(completion)
# if a row doesn't have the same number of columns as cols, then remove it
rows = [row for row in rows if len(row) == len(cols)]
table = pd.DataFrame(rows, columns=cols)
return table
## ChatGPT ##
def append_user_input_to_chat(chatbot_prompt, user_input):
"""
Appends the user input to the chatbot prompt dict
args:
chatbot_prompt: chatbot prompt dict
user_input: user input
returns:
chatbot_prompt_cpy: chatbot prompt dict with user input appended
"""
# create copy of chatbot prompt
chatbot_prompt_cpy = chatbot_prompt.copy()
prompt_dict = {'role': 'user', 'content': user_input}
chatbot_prompt_cpy.append(prompt_dict)
return chatbot_prompt_cpy | [
"{'role': 'user', 'content': PLACEHOLDER}"
] |
2024-01-10 | andrecharneca/News-Info-Extraction | code~src~pipelines~gpt3_pipeline.py | import openai
import os
from typing import Union
from .utils.prompts import ENTITY_REL_ENTITY_PROMPT
from .utils.relationships import RELATIONSHIP_LIST
from .utils.utils import GPT3Utils
from .datamodules.datasets import ParagraphDataset
import json
from pipelines.paths import TEMP_DATA_DIR
class CompanyGPT3Pipeline():
"""
Paragraph (dict, list[dict], ParagraphDataset) -> Prompt -> Post processing/filtering -> {'relationships': list[dict]}
- relationships (dict {"entity_1": str, "entity_2": str, "relationship": str, "passage": str})
Expects ParagraphDataset with keys: "text", "title", "url"
The prompt is formatted for the completion to be |Company|Relationship|Company|Date|Passage|, and end in |end|.
"""
def __init__(self,
prompt : str = ENTITY_REL_ENTITY_PROMPT,
debug : bool = False,
debug_article_ids : list[int] = None,
**gpt_kwargs):
self.prompt = prompt
self.gpt_kwargs = gpt_kwargs
self.debug = debug
self.input = {"article_id":-1, "paragrah_id":-1} # used for postprocessing
openai.api_key = os.environ["OPENAI_API_KEY"]
def _get_response(self, article_id, paragraph_id):
response_path = os.path.join(TEMP_DATA_DIR, f"gpt3_responses/article_id_{article_id}_paragraph_id_{paragraph_id}.json")
with open(response_path, "r") as f:
response = json.load(f)
return response
def preprocess(self, input):
self.input = input
model_input = self.prompt.format(text=input["text"], title=input["title"], date="")
return model_input
def forward(self, model_input):
if self.debug:
model_output = self._get_response(self.input["article_id"], self.input["paragraph_id"])
else:
model_output = openai.Completion.create(prompt = model_input, **self.gpt_kwargs)
# save response to json
file_name = os.path.join(TEMP_DATA_DIR, f"gpt3_responses/article_id_{self.input['article_id']}_paragraph_id_{self.input['paragraph_id']}.json")
json.dump(model_output, open(file_name, "w"))
return model_output
def postprocess(self, model_output):
completion = model_output["choices"][0]["text"]
relationships = GPT3Utils.parse_table(completion)
entities = GPT3Utils.parse_entity_list(completion)
relationships = GPT3Utils.remove_hallucinated_relationships(relationships, RELATIONSHIP_LIST)
input_text = f"{self.input['title']} {self.input['text']}"
relationships = GPT3Utils.filter_relationships_by_text(relationships, input_text)
entities = GPT3Utils.filter_entities_by_text(entities, input_text)
return {"relationships": relationships, "entities": entities}
def __call__(self, paragraphs : Union[dict, list[dict], ParagraphDataset], *args, **kwargs):
"""
Runs the pipeline sequentially on the input paragraphs.
Works paragraph by paragraph.
"""
if isinstance(paragraphs, ParagraphDataset):
return self(paragraphs.data, *args, **kwargs)
elif isinstance(paragraphs, list):
return [self(paragraph, *args, **kwargs) for paragraph in paragraphs]
elif isinstance(paragraphs, dict):
prep_paragraph = self.preprocess(paragraphs)
model_output = self.forward(prep_paragraph)
post_output = self.postprocess(model_output)
return post_output | [] |
2024-01-10 | jlevy/autotube | backend~src~prompts.py | from anthropic import HUMAN_PROMPT, AI_PROMPT
def make_prompt(transcript) -> str:
return f"""
{HUMAN_PROMPT}
The following is a transcript of a podcast.
<transcript>
{transcript}
</transcript>
Read the transcript and answer the following questions:
1. What were the main high-level topics discussed in the podcast?
2. For each logical topic discussed in the podcast, what are the key takeaways? Summarize as a short sentence or phrase.
After thinking about, format your answers as XML, like so:
```xml
<?xml version="1.0" encoding="UTF-8"?>
<video>
<topics>
<topic>
<content>A short word or phrase describing the topic</content>
<instances>
<substring_index></substring_index>
</topic>
<topic>
<content>Another short word or phrase, describing a different topic discussed</content>
<instances>
<substring_index>A substring from the transcript to reference a section where the topic was discussed</substring_index>
<substring_index>Another substring from the transcript to reference a different section where the same topic was discussed</substring_index>
</topic>
</topics>
<takeaways>
<takeaway>
<content>A short sentence summarizing the content discussed in this segment</content>
<substring_index>A substring from the transcript to reference the section being summarized in this takeaway</substring_index>
</takeaway>
<takeaway>
<content>Another short sentence summarizing the content discussed in this segment</content>
<substring_index>A substring from the transcript to reference the section being summarized in this takeaway</substring_index>
</takeaway>
</takeaways>
</video>
```
**IMPORTANT:** Please return _only_ the correctly formatted XML, without any additional information or text.
{AI_PROMPT}""" | [] |
2024-01-10 | ionvop/ai-desktop-assistant | 20231204~assets~python~stt.py | import openai
import os
import sys
def main():
openai.api_key = os.getenv("OPENAI_API_KEY")
audio_file = open(sys.argv[1], "rb")
response = openai.audio.transcriptions.create(
model="whisper-1",
file=audio_file
)
print(response.text)
main() | [] |
2024-01-10 | cczhong11/MyAssistant | Tool~GPTTool.py | import os
from Tool.BaseTool import BaseTool
import requests
import openai
class GPTTool(BaseTool):
def __init__(self, token):
super(GPTTool, self).__init__("chatbot")
self.client = openai.OpenAI(api_key=token)
def reply(self, message):
response = self.client.chat.completions.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": message}]
)
return response.choices[0].message.content
| [] |
2024-01-10 | BIT-SYS/gem5-spm-module | src~cpu~BaseCPU.py | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005-2008 The Regents of The University of Michigan
# Copyright (c) 2011 Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Rick Strong
# Andreas Hansson
import sys
from m5.defines import buildEnv
from m5.params import *
from m5.proxy import *
from Bus import CoherentBus
from InstTracer import InstTracer
from ExeTracer import ExeTracer
from MemObject import MemObject
from ClockDomain import *
default_tracer = ExeTracer()
if buildEnv['TARGET_ISA'] == 'alpha':
from AlphaTLB import AlphaDTB, AlphaITB
from AlphaInterrupts import AlphaInterrupts
from AlphaISA import AlphaISA
isa_class = AlphaISA
elif buildEnv['TARGET_ISA'] == 'sparc':
from SparcTLB import SparcTLB
from SparcInterrupts import SparcInterrupts
from SparcISA import SparcISA
isa_class = SparcISA
elif buildEnv['TARGET_ISA'] == 'x86':
from X86TLB import X86TLB
from X86LocalApic import X86LocalApic
from X86ISA import X86ISA
isa_class = X86ISA
elif buildEnv['TARGET_ISA'] == 'mips':
from MipsTLB import MipsTLB
from MipsInterrupts import MipsInterrupts
from MipsISA import MipsISA
isa_class = MipsISA
elif buildEnv['TARGET_ISA'] == 'arm':
from ArmTLB import ArmTLB
from ArmInterrupts import ArmInterrupts
from ArmISA import ArmISA
isa_class = ArmISA
elif buildEnv['TARGET_ISA'] == 'power':
from PowerTLB import PowerTLB
from PowerInterrupts import PowerInterrupts
from PowerISA import PowerISA
isa_class = PowerISA
class BaseCPU(MemObject):
type = 'BaseCPU'
abstract = True
cxx_header = "cpu/base.hh"
@classmethod
def export_methods(cls, code):
code('''
void switchOut();
void takeOverFrom(BaseCPU *cpu);
bool switchedOut();
void flushTLBs();
Counter totalInsts();
void scheduleInstStop(ThreadID tid, Counter insts, const char *cause);
void scheduleLoadStop(ThreadID tid, Counter loads, const char *cause);
''')
@classmethod
def memory_mode(cls):
"""Which memory mode does this CPU require?"""
return 'invalid'
@classmethod
def require_caches(cls):
"""Does the CPU model require caches?
Some CPU models might make assumptions that require them to
have caches.
"""
return False
@classmethod
def support_take_over(cls):
"""Does the CPU model support CPU takeOverFrom?"""
return False
def takeOverFrom(self, old_cpu):
self._ccObject.takeOverFrom(old_cpu._ccObject)
system = Param.System(Parent.any, "system object")
cpu_id = Param.Int(-1, "CPU identifier")
numThreads = Param.Unsigned(1, "number of HW thread contexts")
function_trace = Param.Bool(False, "Enable function trace")
function_trace_start = Param.Tick(0, "Tick to start function trace")
checker = Param.BaseCPU(NULL, "checker CPU")
do_checkpoint_insts = Param.Bool(True,
"enable checkpoint pseudo instructions")
do_statistics_insts = Param.Bool(True,
"enable statistics pseudo instructions")
profile = Param.Latency('0ns', "trace the kernel stack")
do_quiesce = Param.Bool(True, "enable quiesce instructions")
workload = VectorParam.Process([], "processes to run")
if buildEnv['TARGET_ISA'] == 'sparc':
dtb = Param.SparcTLB(SparcTLB(), "Data TLB")
itb = Param.SparcTLB(SparcTLB(), "Instruction TLB")
interrupts = Param.SparcInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.SparcISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'alpha':
dtb = Param.AlphaTLB(AlphaDTB(), "Data TLB")
itb = Param.AlphaTLB(AlphaITB(), "Instruction TLB")
interrupts = Param.AlphaInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.AlphaISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'x86':
dtb = Param.X86TLB(X86TLB(), "Data TLB")
itb = Param.X86TLB(X86TLB(), "Instruction TLB")
interrupts = Param.X86LocalApic(NULL, "Interrupt Controller")
isa = VectorParam.X86ISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'mips':
dtb = Param.MipsTLB(MipsTLB(), "Data TLB")
itb = Param.MipsTLB(MipsTLB(), "Instruction TLB")
interrupts = Param.MipsInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.MipsISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'arm':
dtb = Param.ArmTLB(ArmTLB(), "Data TLB")
itb = Param.ArmTLB(ArmTLB(), "Instruction TLB")
interrupts = Param.ArmInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.ArmISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'power':
UnifiedTLB = Param.Bool(True, "Is this a Unified TLB?")
dtb = Param.PowerTLB(PowerTLB(), "Data TLB")
itb = Param.PowerTLB(PowerTLB(), "Instruction TLB")
interrupts = Param.PowerInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.PowerISA([ isa_class() ], "ISA instance")
else:
print "Don't know what TLB to use for ISA %s" % \
buildEnv['TARGET_ISA']
sys.exit(1)
max_insts_all_threads = Param.Counter(0,
"terminate when all threads have reached this inst count")
max_insts_any_thread = Param.Counter(0,
"terminate when any thread reaches this inst count")
simpoint_start_insts = VectorParam.Counter([],
"starting instruction counts of simpoints")
max_loads_all_threads = Param.Counter(0,
"terminate when all threads have reached this load count")
max_loads_any_thread = Param.Counter(0,
"terminate when any thread reaches this load count")
progress_interval = Param.Frequency('0Hz',
"frequency to print out the progress message")
switched_out = Param.Bool(False,
"Leave the CPU switched out after startup (used when switching " \
"between CPU models)")
tracer = Param.InstTracer(default_tracer, "Instruction tracer")
icache_port = MasterPort("Instruction Port")
dcache_port = MasterPort("Data Port")
_cached_ports = ['icache_port', 'dcache_port']
if buildEnv['TARGET_ISA'] in ['x86', 'arm']:
_cached_ports += ["itb.walker.port", "dtb.walker.port"]
_uncached_slave_ports = []
_uncached_master_ports = []
if buildEnv['TARGET_ISA'] == 'x86':
_uncached_slave_ports += ["interrupts.pio", "interrupts.int_slave"]
_uncached_master_ports += ["interrupts.int_master"]
def createInterruptController(self):
if buildEnv['TARGET_ISA'] == 'sparc':
self.interrupts = SparcInterrupts()
elif buildEnv['TARGET_ISA'] == 'alpha':
self.interrupts = AlphaInterrupts()
elif buildEnv['TARGET_ISA'] == 'x86':
self.apic_clk_domain = DerivedClockDomain(clk_domain =
Parent.clk_domain,
clk_divider = 16)
self.interrupts = X86LocalApic(clk_domain = self.apic_clk_domain,
pio_addr=0x2000000000000000)
_localApic = self.interrupts
elif buildEnv['TARGET_ISA'] == 'mips':
self.interrupts = MipsInterrupts()
elif buildEnv['TARGET_ISA'] == 'arm':
self.interrupts = ArmInterrupts()
elif buildEnv['TARGET_ISA'] == 'power':
self.interrupts = PowerInterrupts()
else:
print "Don't know what Interrupt Controller to use for ISA %s" % \
buildEnv['TARGET_ISA']
sys.exit(1)
def connectCachedPorts(self, bus):
for p in self._cached_ports:
exec('self.%s = bus.slave' % p)
def connectUncachedPorts(self, bus):
for p in self._uncached_slave_ports:
exec('self.%s = bus.master' % p)
for p in self._uncached_master_ports:
exec('self.%s = bus.slave' % p)
def connectAllPorts(self, cached_bus, uncached_bus = None):
self.connectCachedPorts(cached_bus)
if not uncached_bus:
uncached_bus = cached_bus
self.connectUncachedPorts(uncached_bus)
def addPrivateSplitL1Caches(self, ic, dc, iwc = None, dwc = None):
self.icache = ic
self.dcache = dc
self.icache_port = ic.cpu_side
self.dcache_port = dc.cpu_side
self._cached_ports = ['icache.mem_side', 'dcache.mem_side']
if buildEnv['TARGET_ISA'] in ['x86', 'arm']:
if iwc and dwc:
self.itb_walker_cache = iwc
self.dtb_walker_cache = dwc
self.itb.walker.port = iwc.cpu_side
self.dtb.walker.port = dwc.cpu_side
self._cached_ports += ["itb_walker_cache.mem_side", \
"dtb_walker_cache.mem_side"]
else:
self._cached_ports += ["itb.walker.port", "dtb.walker.port"]
# Checker doesn't need its own tlb caches because it does
# functional accesses only
if self.checker != NULL:
self._cached_ports += ["checker.itb.walker.port", \
"checker.dtb.walker.port"]
def addTwoLevelCacheHierarchy(self, ic, dc, l2c, iwc = None, dwc = None):
self.addPrivateSplitL1Caches(ic, dc, iwc, dwc)
# Set a width of 32 bytes (256-bits), which is four times that
# of the default bus. The clock of the CPU is inherited by
# default.
self.toL2Bus = CoherentBus(width = 32)
self.connectCachedPorts(self.toL2Bus)
self.l2cache = l2c
self.toL2Bus.master = self.l2cache.cpu_side
self._cached_ports = ['l2cache.mem_side']
def createThreads(self):
self.isa = [ isa_class() for i in xrange(self.numThreads) ]
if self.checker != NULL:
self.checker.createThreads()
def addCheckerCpu(self):
pass
| [] |
2024-01-10 | jaykchen/splitter | text-splitter.py | from typing import List
from pygments.lexers import guess_lexer
from pygments.util import ClassNotFound
from langchain.text_splitter import RecursiveCharacterTextSplitter
from transformers import AutoTokenizer
import re
def detect_language(code: str):
try:
lexer = guess_lexer(code)
return lexer.name
except ClassNotFound:
return "Unknown language"
def huggingface_tokenizer_length(text: str, tokenizer) -> int:
tokenized_text = tokenizer(text, truncation=True, max_length=512)["input_ids"]
return len(tokenized_text)
# Initialize the tokenizer once
model_name = "bert-large-uncased"
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Define the chunk size and overlap
chunk_size = 2000
chunk_overlap = 5
class LanguageAwareTextSplitter(RecursiveCharacterTextSplitter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.language_separators = {
"Python": ["\n\n", "\n", " ", ""],
"Rust": ["\nfn ", "\nconst ", "\nlet ", "\nif ", "\nwhile ", "\nfor ", "\nloop ", "\nmatch ", "\nconst ", "\n\n", "\n", " ", ""],
"JS": [
"\nfunction ",
"\nconst ",
"\nlet ",
"\nvar ",
"\nclass ",
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
"\ndefault ",
"\n\n",
"\n",
" ",
"",
],
"SHELL": [
"\nfunction ",
"\nif ",
"\nfor ",
"\nwhile ",
"\ncase ",
"\n\n",
"\n",
" ",
"",
],
"MARKDOWN": [
"\n#{1,6} ",
"```\n",
"\n\\*\\*\\*+\n",
"\n---+\n",
"\n___+\n",
"\n\n",
"\n",
" ",
"",
],
"default": ["\n\n", "\n", " ", ""]
}
def split_text(self, text: str) -> List[str]:
chunks = []
current_chunk = ""
# Define a regex pattern that finds code blocks and the text immediately before and after them
code_block_pattern = r"(.*?\n\n)?(```.*?```)(\n\n.*?)?"
pattern = re.compile(code_block_pattern, re.DOTALL)
# Initialize the start index of the next search
start_idx = 0
# Search for the next code block along with its surrounding text
while True:
match = pattern.search(text, start_idx)
if not match:
break
# Extract text segments before, the code block itself, and after
before, code_block, after = match.groups()
before = before or "" # Ensure 'before' is a string if it's None
after = after or "" # Ensure 'after' is a string if it's None
# Accumulate 'before' text and check for chunk splitting
if before:
current_chunk = self._accumulate_and_maybe_split(current_chunk + before, chunks)
# Always keep the code block in the same chunk
current_chunk += code_block
current_chunk = self._accumulate_and_maybe_split(current_chunk, chunks)
# Accumulate 'after' text and check for chunk splitting
if after:
current_chunk = self._accumulate_and_maybe_split(current_chunk + after, chunks)
# Update the start index for the next search
start_idx = match.end()
# Append any remaining text after the last code block
remaining_text = text[start_idx:]
if remaining_text.strip():
current_chunk = self._accumulate_and_maybe_split(current_chunk + remaining_text, chunks)
# Add the last chunk if it's not empty
if current_chunk:
chunks.append(current_chunk)
return chunks
def _accumulate_and_maybe_split(self, text, chunks):
if huggingface_tokenizer_length(text, tokenizer) > self._chunk_size:
language = detect_language(text)
self.separators = self.language_separators.get(language, self.language_separators["default"])
chunk_splits = super().split_text(text)
# Add all but the last piece to chunks
chunks.extend(chunk_splits[:-1])
# Start the next chunk with the last piece
return chunk_splits[-1]
return text
# ... (rest of your code)
# Create the text splitter
splitter = LanguageAwareTextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
length_function=lambda text: huggingface_tokenizer_length(text, tokenizer),
)
def append_to_file(file_path, text):
with open(file_path, 'a') as file:
file.write(text)
# Function to read text from a file
def read_file(file_path):
with open(file_path, 'r') as file:
return file.read()
# Define the path to the text file
file_path = "/Users/jichen/Projects/rust-bert/splitter/book.txt"
file_path = "/Users/jichen/Projects/rust-bert/splitter/k8s.md"
# Read the text from the file
text = read_file(file_path)
# Split the text
splits = splitter.split_text(text)
# Print the splits
for idx, split in enumerate(splits, start=1):
append_to_file("splits-detect-truncate-380-k8s.txt", f"Split {idx}:\n{split}\n\n")
| [] |
2024-01-10 | tdwebdesign/thomasd9e | fantasy_news~admin.py | from django.contrib import admin
from django.utils.text import slugify
from .models import GameRecap
from .services import FantasyLeague
from blog.models import Post
from accounts.models import CustomUser
import markdown
import os
import openai
import spacy
# Constants and initializations
league = FantasyLeague("985036837548855296")
nlp = spacy.load("en_core_web_sm")
openai.api_key = os.getenv("OPENAI_API_KEY")
def generate_prompt_template(owned_players, recap_body):
return f"""
Provide a summary with a touch of humor of the following game recap, structured with the title
'Fantasy Football Highlights: <game score>'. <game score> is the score of the game found in the recap.
Example: "Giants 45 - Panthers 26". Follow this with three distinct sections titled 'Game Summary',
'McAlister's Deli Quick Bites', and 'Free Agent Spotlight'. In the 'McAlister's Deli Quick Bites' section,
give quick takes on these owned fantasy players: {', '.join(owned_players)}. Highlight a potential free
agent from the recap that had a notable performance in the 'Free Agent Spotlight' section. Ensure each
section is clearly separated and formatted with bold headings. Do not include any messages after the output.
###
{recap_body}
###
"""
def extract_owned_players(game_recap_body):
doc = nlp(game_recap_body)
player_names = {entity.text for entity in doc.ents if entity.label_ == "PERSON"}
return [
f"{player} - {league.get_owner_for_player(player)}"
for player in player_names
if league.get_owner_for_player(player)
]
def create_post_from_response(response):
message = response["choices"][0]["message"]["content"]
lines = message.split("\n")
title = lines[0]
slug = slugify(title)
message = "\n".join(lines[1:])
html_message = markdown.markdown(message)
user = CustomUser.objects.get(username="AI Writer")
post = Post(
title=title, slug=slug, content=html_message, author=user, post_status="draft"
)
post.save()
def process_game_recap(game_recap):
owned_players = extract_owned_players(game_recap.body)
print(owned_players)
prompt = generate_prompt_template(owned_players, game_recap.body)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
temperature=1,
max_tokens=500,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
create_post_from_response(response)
def process_selected_recaps(modeladmin, request, queryset):
for recap in queryset:
process_game_recap(recap)
recap.processed = True
recap.save()
@admin.register(GameRecap)
class GameRecapAdmin(admin.ModelAdmin):
list_display = ["title", "date", "processed"]
actions = [process_selected_recaps]
| [] |
2024-01-10 | HoagyC/sparse_coding | interpret.py | import argparse
import asyncio
import copy
import importlib
import json
import multiprocessing as mp
import os
import pickle
import sys
from datetime import datetime
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
import torch
import torch.nn as nn
from baukit import Trace
from datasets import load_dataset
from transformer_lens import HookedTransformer
from activation_dataset import check_use_baukit, make_tensor_name
from config import BaseArgs, InterpArgs, InterpGraphArgs
from autoencoders.learned_dict import LearnedDict
# set OPENAI_API_KEY environment variable from secrets.json['openai_key']
# needs to be done before importing openai interp bits
with open("secrets.json") as f:
secrets = json.load(f)
os.environ["OPENAI_API_KEY"] = secrets["openai_key"]
mp.set_start_method("spawn", force=True)
from neuron_explainer.activations.activation_records import \
calculate_max_activation
from neuron_explainer.activations.activations import (
ActivationRecord, ActivationRecordSliceParams, NeuronId, NeuronRecord)
from neuron_explainer.explanations.calibrated_simulator import \
UncalibratedNeuronSimulator
from neuron_explainer.explanations.explainer import \
TokenActivationPairExplainer
from neuron_explainer.explanations.prompt_builder import PromptFormat
from neuron_explainer.explanations.scoring import (
aggregate_scored_sequence_simulations, simulate_and_score)
from neuron_explainer.explanations.simulator import ExplanationNeuronSimulator
from neuron_explainer.fast_dataclasses import loads
EXPLAINER_MODEL_NAME = "gpt-4" # "gpt-3.5-turbo"
SIMULATOR_MODEL_NAME = "text-davinci-003"
OPENAI_MAX_FRAGMENTS = 50000
OPENAI_FRAGMENT_LEN = 64
OPENAI_EXAMPLES_PER_SPLIT = 5
N_SPLITS = 4
TOTAL_EXAMPLES = OPENAI_EXAMPLES_PER_SPLIT * N_SPLITS
REPLACEMENT_CHAR = "�"
MAX_CONCURRENT: Any = None
BASE_FOLDER = "/mnt/ssd-cluster/sweep_interp"
# Replaces the load_neuron function in neuron_explainer.activations.activations because couldn't get blobfile to work
def load_neuron(
layer_index: Union[str, int],
neuron_index: Union[str, int],
dataset_path: str = "https://openaipublic.blob.core.windows.net/neuron-explainer/data/collated-activations",
) -> NeuronRecord:
"""Load the NeuronRecord for the specified neuron from OpenAI's original work with GPT-2."""
url = os.path.join(dataset_path, str(layer_index), f"{neuron_index}.json")
response = requests.get(url)
if response.status_code != 200:
raise ValueError(f"Neuron record not found at {url}.")
neuron_record = loads(response.content)
if not isinstance(neuron_record, NeuronRecord):
raise ValueError(f"Stored data incompatible with current version of NeuronRecord dataclass.")
return neuron_record
def make_feature_activation_dataset(
model: HookedTransformer,
learned_dict: LearnedDict,
layer: int,
layer_loc: str,
device: str = "cpu",
n_fragments=OPENAI_MAX_FRAGMENTS,
max_features: int = 0, # number of features to store activations for, 0 for all
random_fragment=True, # used for debugging
):
"""
Takes a specified point of a model, and a dataset.
Returns a dataset which contains the activations of the model at that point,
for each fragment in the dataset, transformed into the feature space
"""
model.to(device)
model.eval()
learned_dict.to_device(device)
use_baukit = check_use_baukit(model.cfg.model_name)
if max_features:
feat_dim = min(max_features, learned_dict.n_feats)
else:
feat_dim = learned_dict.n_feats
sentence_dataset = load_dataset("openwebtext", split="train", streaming=True)
if model.cfg.model_name == "nanoGPT":
tokenizer_model = HookedTransformer.from_pretrained("gpt2", device=device)
else:
tokenizer_model = model
tensor_name = make_tensor_name(layer, layer_loc, model.cfg.model_name)
# make list of sentence, tokenization pairs
iter_dataset = iter(sentence_dataset)
# Make dataframe with columns for each feature, and rows for each sentence fragment
# each row should also have the full sentence, the current tokens and the previous tokens
n_thrown = 0
n_added = 0
batch_size = min(20, n_fragments)
fragment_token_ids_list = []
fragment_token_strs_list = []
activation_maxes_table = np.zeros((n_fragments, feat_dim), dtype=np.float16)
activation_data_table = np.zeros((n_fragments, feat_dim * OPENAI_FRAGMENT_LEN), dtype=np.float16)
with torch.no_grad():
while n_added < n_fragments:
fragments: List[torch.Tensor] = []
fragment_strs: List[str] = []
while len(fragments) < batch_size:
print(
f"Added {n_added} fragments, thrown {n_thrown} fragments\t\t\t\t\t\t",
end="\r",
)
sentence = next(iter_dataset)
# split the sentence into fragments
sentence_tokens = tokenizer_model.to_tokens(sentence["text"], prepend_bos=False).to(device)
n_tokens = sentence_tokens.shape[1]
# get a random fragment from the sentence - only taking one fragment per sentence so examples aren't correlated]
if random_fragment:
token_start = np.random.randint(0, n_tokens - OPENAI_FRAGMENT_LEN)
else:
token_start = 0
fragment_tokens = sentence_tokens[:, token_start : token_start + OPENAI_FRAGMENT_LEN]
token_strs = tokenizer_model.to_str_tokens(fragment_tokens[0])
if REPLACEMENT_CHAR in token_strs:
n_thrown += 1
continue
fragment_strs.append(token_strs)
fragments.append(fragment_tokens)
tokens = torch.cat(fragments, dim=0)
assert tokens.shape == (batch_size, OPENAI_FRAGMENT_LEN), tokens.shape
# breakpoint()
if use_baukit:
with Trace(model, tensor_name) as ret:
_ = model(tokens)
mlp_activation_data = ret.output.to(device)
mlp_activation_data = nn.functional.gelu(mlp_activation_data)
else:
_, cache = model.run_with_cache(tokens)
mlp_activation_data = cache[tensor_name].to(device)
for i in range(batch_size):
fragment_tokens = tokens[i : i + 1, :]
activation_data = mlp_activation_data[i : i + 1, :].squeeze(0)
token_ids = fragment_tokens[0].tolist()
feature_activation_data = learned_dict.encode(activation_data)
feature_activation_maxes = torch.max(feature_activation_data, dim=0)[0]
activation_maxes_table[n_added, :] = feature_activation_maxes.cpu().numpy()[:feat_dim]
feature_activation_data = feature_activation_data.cpu().numpy()[:, :feat_dim]
activation_data_table[n_added, :] = feature_activation_data.flatten()
fragment_token_ids_list.append(token_ids)
fragment_token_strs_list.append(fragment_strs[i])
n_added += 1
if n_added >= n_fragments:
break
print(f"Added {n_added} fragments, thrown {n_thrown} fragments")
# Now we build the dataframe from the numpy arrays and the lists
print(f"Making dataframe from {n_added} fragments")
df = pd.DataFrame()
df["fragment_token_ids"] = fragment_token_ids_list
df["fragment_token_strs"] = fragment_token_strs_list
maxes_column_names = [f"feature_{i}_max" for i in range(feat_dim)]
activations_column_names = [
f"feature_{i}_activation_{j}" for j in range(OPENAI_FRAGMENT_LEN) for i in range(feat_dim)
] # nested for loops are read left to right
assert feature_activation_data.shape == (OPENAI_FRAGMENT_LEN, feat_dim)
df = pd.concat([df, pd.DataFrame(activation_maxes_table, columns=maxes_column_names)], axis=1)
df = pd.concat(
[df, pd.DataFrame(activation_data_table, columns=activations_column_names)],
axis=1,
)
print(f"Threw away {n_thrown} fragments, made {len(df)} fragments")
return df
def get_df(
feature_dict: LearnedDict,
model_name: str,
layer: int,
layer_loc: str,
n_feats: int,
save_loc: str,
device: str,
force_refresh: bool = False,
) -> pd.DataFrame:
# Load feature dict
feature_dict.to_device(device)
df_loc = os.path.join(save_loc, f"activation_df.hdf")
reload_data = True
if os.path.exists(df_loc) and not force_refresh:
start_time = datetime.now()
base_df = pd.read_hdf(df_loc)
print(f"Loaded dataset in {datetime.now() - start_time}")
# Check that the dataset has enough features saved
if f"feature_{n_feats - 1}_activation_0" in base_df.keys():
reload_data = False
else:
print("Dataset does not have enough features, remaking")
if reload_data:
model = HookedTransformer.from_pretrained(model_name, device=device)
base_df = make_feature_activation_dataset(
model,
learned_dict=feature_dict,
layer=layer,
layer_loc=layer_loc,
device=device,
max_features=n_feats,
)
# save the dataset, saving each column separately so that we can retrive just the columns we want later
print(f"Saving dataset to {df_loc}")
os.makedirs(save_loc, exist_ok=True)
base_df.to_hdf(df_loc, key="df", mode="w")
# save the autoencoder being investigated
os.makedirs(save_loc, exist_ok=True)
torch.save(feature_dict, os.path.join(save_loc, "autoencoder.pt"))
return base_df
async def interpret(base_df: pd.DataFrame, save_folder: str, n_feats_to_explain: int) -> None:
for feat_n in range(0, n_feats_to_explain):
if os.path.exists(os.path.join(save_folder, f"feature_{feat_n}")):
print(f"Feature {feat_n} already exists, skipping")
continue
activation_col_names = [f"feature_{feat_n}_activation_{i}" for i in range(OPENAI_FRAGMENT_LEN)]
read_fields = [
"fragment_token_strs",
f"feature_{feat_n}_max",
*activation_col_names,
]
# check that the dataset has the required columns
if not all([field in base_df.columns for field in read_fields]):
print(f"Dataset does not have all required columns for feature {feat_n}, skipping")
continue
df = base_df[read_fields].copy()
sorted_df = df.sort_values(by=f"feature_{feat_n}_max", ascending=False)
sorted_df = sorted_df.head(TOTAL_EXAMPLES)
top_activation_records = []
for i, row in sorted_df.iterrows():
top_activation_records.append(
ActivationRecord(
row["fragment_token_strs"],
[row[f"feature_{feat_n}_activation_{j}"] for j in range(OPENAI_FRAGMENT_LEN)],
)
)
random_activation_records: List[ActivationRecord] = []
# Adding random fragments
# random_df = df.sample(n=TOTAL_EXAMPLES)
# for i, row in random_df.iterrows():
# random_activation_records.append(ActivationRecord(row["fragment_token_strs"], [row[f"feature_{feat_n}_activation_{j}"] for j in range(OPENAI_FRAGMENT_LEN)]))
# making sure that the have some variation in each of the features, though need to be careful that this doesn't bias the results
random_ordering = torch.randperm(len(df)).tolist()
skip_feature = False
while len(random_activation_records) < TOTAL_EXAMPLES:
try:
i = random_ordering.pop()
except IndexError:
skip_feature = True
break
# if there are no activations for this fragment, skip it
if df.iloc[i][f"feature_{feat_n}_max"] == 0:
continue
random_activation_records.append(
ActivationRecord(
df.iloc[i]["fragment_token_strs"],
[df.iloc[i][f"feature_{feat_n}_activation_{j}"] for j in range(OPENAI_FRAGMENT_LEN)],
)
)
if skip_feature:
# Add placeholder folder so that we don't try to recompute this feature
os.makedirs(os.path.join(save_folder, f"feature_{feat_n}"), exist_ok=True)
print(f"Skipping feature {feat_n} due to lack of activating examples")
continue
neuron_id = NeuronId(layer_index=2, neuron_index=feat_n)
neuron_record = NeuronRecord(
neuron_id=neuron_id,
random_sample=random_activation_records,
most_positive_activation_records=top_activation_records,
)
slice_params = ActivationRecordSliceParams(n_examples_per_split=OPENAI_EXAMPLES_PER_SPLIT)
train_activation_records = neuron_record.train_activation_records(slice_params)
valid_activation_records = neuron_record.valid_activation_records(slice_params)
explainer = TokenActivationPairExplainer(
model_name=EXPLAINER_MODEL_NAME,
prompt_format=PromptFormat.HARMONY_V4,
max_concurrent=MAX_CONCURRENT,
)
explanations = await explainer.generate_explanations(
all_activation_records=train_activation_records,
max_activation=calculate_max_activation(train_activation_records),
num_samples=1,
)
assert len(explanations) == 1
explanation = explanations[0]
print(f"Feature {feat_n}, {explanation=}")
# Simulate and score the explanation.
format = PromptFormat.HARMONY_V4 if SIMULATOR_MODEL_NAME == "gpt-3.5-turbo" else PromptFormat.INSTRUCTION_FOLLOWING
simulator = UncalibratedNeuronSimulator(
ExplanationNeuronSimulator(
SIMULATOR_MODEL_NAME,
explanation,
max_concurrent=MAX_CONCURRENT,
prompt_format=format,
)
)
scored_simulation = await simulate_and_score(simulator, valid_activation_records)
score = scored_simulation.get_preferred_score()
assert len(scored_simulation.scored_sequence_simulations) == 10
top_only_score = aggregate_scored_sequence_simulations(
scored_simulation.scored_sequence_simulations[:5]
).get_preferred_score()
random_only_score = aggregate_scored_sequence_simulations(
scored_simulation.scored_sequence_simulations[5:]
).get_preferred_score()
print(
f"Feature {feat_n}, score={score:.2f}, top_only_score={top_only_score:.2f}, random_only_score={random_only_score:.2f}"
)
feature_name = f"feature_{feat_n}"
feature_folder = os.path.join(save_folder, feature_name)
os.makedirs(feature_folder, exist_ok=True)
pickle.dump(
scored_simulation,
open(os.path.join(feature_folder, "scored_simulation.pkl"), "wb"),
)
pickle.dump(neuron_record, open(os.path.join(feature_folder, "neuron_record.pkl"), "wb"))
# write a file with the explanation and the score
with open(os.path.join(feature_folder, "explanation.txt"), "w") as f:
f.write(
f"{explanation}\nScore: {score:.2f}\nExplainer model: {EXPLAINER_MODEL_NAME}\nSimulator model: {SIMULATOR_MODEL_NAME}\n"
)
f.write(f"Top only score: {top_only_score:.2f}\n")
f.write(f"Random only score: {random_only_score:.2f}\n")
def run(dict: LearnedDict, cfg: InterpArgs):
assert cfg.df_n_feats >= cfg.n_feats_explain
df = get_df(
feature_dict=dict,
model_name=cfg.model_name,
layer=cfg.layer,
layer_loc=cfg.layer_loc,
n_feats=cfg.df_n_feats,
save_loc=cfg.save_loc,
device=cfg.device,
)
asyncio.run(interpret(df, cfg.save_loc, n_feats_to_explain=cfg.n_feats_explain))
def get_score(lines: List[str], mode: str):
if mode == "top":
return float(lines[-3].split(" ")[-1])
elif mode == "random":
return float(lines[-2].split(" ")[-1])
elif mode == "top_random":
score_line = [line for line in lines if "Score: " in line][0]
return float(score_line.split(" ")[1])
else:
raise ValueError(f"Unknown mode: {mode}")
def run_folder(cfg: InterpArgs):
base_folder = cfg.load_interpret_autoencoder
all_encoders = os.listdir(cfg.load_interpret_autoencoder)
all_encoders = [x for x in all_encoders if (x.endswith(".pt") or x.endswith(".pkl"))]
print(f"Found {len(all_encoders)} encoders in {cfg.load_interpret_autoencoder}")
for i, encoder in enumerate(all_encoders):
print(f"Running encoder {i} of {len(all_encoders)}: {encoder}")
learned_dict = torch.load(os.path.join(base_folder, encoder), map_location=torch.device(cfg.device))
cfg.save_loc = os.path.join(BASE_FOLDER, encoder)
run(learned_dict, cfg)
def make_tag_name(hparams: Dict) -> str:
tag = ""
if "tied" in hparams.keys():
tag += f"tied_{hparams['tied']}"
if "dict_size" in hparams.keys():
tag += f"dict_size_{hparams['dict_size']}"
if "l1_alpha" in hparams.keys():
tag += f"l1_alpha_{hparams['l1_alpha']:.2}"
if "bias_decay" in hparams.keys():
tag += "0.0" if hparams["bias_decay"] == 0 else f"{hparams['bias_decay']:.1}"
return tag
def run_from_grouped(cfg: InterpArgs, results_loc: str):
"""
Run autointerpretation across a file of learned dicts as outputted by big_sweep.py or similar.
Expects results_loc to a .pt file containing a list of tuples of (learned_dict, hparams_dict)
"""
# First, read in the results file
results = torch.load(results_loc)
time_str = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
os.makedirs(os.path.join("auto_interp_results", time_str), exist_ok=True)
# Now split the results out into separate files
for learned_dict, hparams_dict in results:
filename = make_tag_name(hparams_dict) + ".pt"
torch.save(learned_dict, os.path.join("auto_interp_results", time_str, filename))
cfg.load_interpret_autoencoder = os.path.join("auto_interp_results", time_str)
run_folder(cfg)
def read_transform_scores(transform_loc: str, score_mode: str, verbose: bool = False) -> Tuple[List[int], List[float]]:
transform_scores = []
transform_ndxs = []
# list all the features by looking for folders
feat_folders = [x for x in os.listdir(transform_loc) if x.startswith("feature_")]
if len(feat_folders) == 0:
return [], []
transform = transform_loc.split('/')[-1]
print(f"{transform=} {len(feat_folders)=}")
for feature_folder in feat_folders:
feature_ndx = int(feature_folder.split("_")[1])
folder = os.path.join(transform_loc, feature_folder)
if not os.path.exists(folder):
continue
if not os.path.exists(os.path.join(folder, "explanation.txt")):
continue
explanation_text = open(os.path.join(folder, "explanation.txt")).read()
# score should be on the second line but if explanation had newlines could be on the third or below
# score = float(explanation_text.split("\n")[1].split(" ")[1])
lines = explanation_text.split("\n")
score = get_score(lines, score_mode)
if verbose:
print(f"{feature_ndx=}, {transform=}, {score=}")
transform_scores.append(score)
transform_ndxs.append(feature_ndx)
return transform_ndxs, transform_scores
def read_scores(results_folder: str, score_mode: str = "top") -> Dict[str, Tuple[List[int], List[float]]]:
assert score_mode in ["top", "random", "top_random"]
scores: Dict[str, Tuple[List[int], List[float]]] = {}
transforms = os.listdir(results_folder)
transforms = [transform for transform in transforms if os.path.isdir(os.path.join(results_folder, transform))]
if "sparse_coding" in transforms:
transforms.remove("sparse_coding")
transforms = ["sparse_coding"] + transforms
for transform in transforms:
transform_ndxs, transform_scores = read_transform_scores(os.path.join(results_folder, transform), score_mode)
if len(transform_ndxs) > 0:
scores[transform] = (transform_ndxs, transform_scores)
return scores
def parse_folder_name(folder_name: str) -> Tuple[str, str, int, float, str]:
"""
Parse the folder name to get the hparams
"""
# examples: tied_mlpout_l1_r2, tied_residual_l5_r8
tied, layer_loc, layer_str, ratio_str, *extras = folder_name.split("_")
if extras:
extra_str = "_".join(extras)
else:
extra_str = ""
layer = int(layer_str[1:])
ratio = float(ratio_str[1:])
if ratio == 0:
ratio = 0.5
return tied, layer_loc, layer, ratio, extra_str
def run_list_of_learned_dicts(dicts: List[Tuple[str, LearnedDict]], cfg):
"""
Run autointerpretation across a folder of learned dicts as outputted by big_sweep.py or similar, where the layer/layer_loc are the same.
"""
for name, dict in dicts:
print(f"Running {name}")
run(dict, cfg)
def worker(queue, device_id):
device = f"cuda:{device_id}"
while not queue.empty():
learned_dict, cfg = queue.get()
print(f"Running {cfg.save_loc}")
cfg.device = device
learned_dict.to_device(device)
run(learned_dict, cfg)
def interpret_across_baselines(n_gpus: int = 3):
baselines_dir = "/mnt/ssd-cluster/baselines"
save_dir = "/mnt/ssd-cluster/auto_interp_results/"
os.makedirs(save_dir, exist_ok=True)
base_cfg = InterpArgs()
if n_gpus > 1:
job_queue: mp.Queue = mp.Queue()
all_folders = os.listdir(baselines_dir)
for folder in all_folders:
layer_str, layer_loc = folder.split("_")
layer = int(layer_str[1:])
layer_baselines = os.listdir(os.path.join(baselines_dir, folder))
for baseline_file in layer_baselines:
cfg = copy.deepcopy(base_cfg)
cfg.layer = layer
cfg.layer_loc = layer_loc
cfg.save_loc = os.path.join(save_dir, folder, baseline_file[:-3])
cfg.n_feats_explain = 150
if not cfg.layer_loc == "residual":
continue
if "nmf" in baseline_file:
continue
learned_dict = torch.load(
os.path.join(baselines_dir, folder, baseline_file),
map_location=cfg.device,
)
print(f"{layer=}, {layer_loc=}, {baseline_file=}")
if n_gpus == 1:
run(learned_dict, cfg)
else:
job_queue.put((learned_dict, cfg))
if n_gpus > 1:
processes = [mp.Process(target=worker, args=(job_queue, i)) for i in range(n_gpus)]
for p in processes:
p.start()
for p in processes:
p.join()
def interpret_across_big_sweep(l1_val: float, n_gpus: int = 1):
base_cfg = InterpArgs()
base_dir = "/mnt/ssd-cluster/bigrun0308"
save_dir = "/mnt/ssd-cluster/auto_interp_results/"
n_chunks_training = 10
os.makedirs(save_dir, exist_ok=True)
all_folders = os.listdir(base_dir)
if n_gpus != 1:
job_queue: List[Tuple[Callable, InterpArgs]] = []
for folder in all_folders:
try:
tied, layer_loc, layer, ratio, extra_str = parse_folder_name(folder)
except:
continue
print(f"{tied}, {layer_loc=}, {layer=}, {ratio=}")
if layer_loc != "residual":
continue
if tied != "tied":
continue
if ratio != 2:
continue
if extra_str != "":
continue
cfg = copy.deepcopy(base_cfg)
autoencoders = torch.load(
os.path.join(base_dir, folder, f"_{n_chunks_training - 1}", "learned_dicts.pt"),
map_location=cfg.device,
)
# find ae with matching l1_val
matching_encoders = [ae for ae in autoencoders if abs(ae[1]["l1_alpha"] - l1_val) < 1e-4]
if not len(matching_encoders) == 1:
print(f"Found {len(matching_encoders)} matching encoders for {folder}")
matching_encoder = matching_encoders[0][0]
# save the learned dict
save_str = f"l{layer}_{layer_loc}/{tied}_r{ratio}_l1a{l1_val:.2}"
# os.makedirs(os.path.join(save_dir, save_str), exist_ok=True)
# torch.save(matching_encoder, os.path.join(save_dir, save_str, "learned_dict.pt"))
# run the interpretation
cfg.load_interpret_autoencoder = os.path.join(save_dir, save_str, "learned_dict.pt")
cfg.layer = layer
cfg.layer_loc = layer_loc
cfg.save_loc = os.path.join(save_dir, save_str)
cfg.n_feats_explain = 150
if n_gpus == 1:
run(matching_encoder, cfg)
else:
cfg.device = f"cuda:{len(job_queue) % n_gpus}"
job_queue.append((matching_encoder, cfg))
if n_gpus > 1:
with mp.Pool(n_gpus) as p:
p.starmap(run, job_queue)
def interpret_across_chunks(l1_val: float, n_gpus: int = 1):
base_cfg = InterpArgs()
base_dir = "/mnt/ssd-cluster/longrun2408"
save_dir = "/mnt/ssd-cluster/auto_interp_results_overtime/"
os.makedirs(save_dir, exist_ok=True)
all_folders = os.listdir(base_dir)
if n_gpus != 1:
job_queue: List[Tuple[Callable, InterpArgs]] = []
for folder in all_folders:
for n_chunks in [1, 4, 16, 32]:
tied, layer_loc, layer, ratio, extra_str = parse_folder_name(folder)
if layer != base_cfg.layer:
continue
cfg = copy.deepcopy(base_cfg)
autoencoders = torch.load(
os.path.join(base_dir, folder, f"_{n_chunks - 1}", "learned_dicts.pt"),
map_location=cfg.device,
)
# find ae with matching l1_val
matching_encoders = [ae for ae in autoencoders if abs(ae[1]["l1_alpha"] - l1_val) < 1e-4]
if not len(matching_encoders) == 1:
print(f"Found {len(matching_encoders)} matching encoders for {folder}")
matching_encoder = matching_encoders[0][0]
# save the learned dict
save_str = f"l{layer}_{layer_loc}/{tied}_r{ratio}_nc{n_chunks}_l1a{l1_val:.2}"
os.makedirs(os.path.join(save_dir, save_str), exist_ok=True)
torch.save(matching_encoder, os.path.join(save_dir, save_str, "learned_dict.pt"))
# run the interpretation
cfg.load_interpret_autoencoder = os.path.join(save_dir, save_str, "learned_dict.pt")
cfg.layer = layer
cfg.layer_loc = layer_loc
cfg.save_loc = os.path.join(save_dir, save_str)
cfg.n_feats_explain = 100
if n_gpus == 1:
run(matching_encoder, cfg)
else:
cfg.device = f"cuda:{len(job_queue) % n_gpus}"
job_queue.append((matching_encoder, cfg))
if n_gpus > 1:
with mp.Pool(n_gpus) as p:
p.starmap(run, job_queue)
def read_results(activation_name: str, score_mode: str) -> None:
results_folder = os.path.join("/mnt/ssd-cluster/auto_interp_results", activation_name)
scores = read_scores(
results_folder, score_mode
) # Dict[str, Tuple[List[int], List[float]]], where the tuple is (feature_ndxs, scores)
if len(scores) == 0:
print(f"No scores found for {activation_name}")
return
transforms = scores.keys()
plt.clf() # clear the plot
# plot the scores as a violin plot
colors = [
"red",
"blue",
"green",
"orange",
"purple",
"pink",
"black",
"brown",
"cyan",
"magenta",
"grey",
]
# fix yrange from -0.2 to 0.6
plt.ylim(-0.2, 0.6)
# add horizontal grid lines every 0.1
plt.yticks(np.arange(-0.2, 0.6, 0.1))
plt.grid(axis="y", color="grey", linestyle="-", linewidth=0.5, alpha=0.3)
# first we need to get the scores into a list of lists
scores_list = [scores[transform][1] for transform in transforms]
# remove any transforms that have no scores
scores_list = [scores for scores in scores_list if len(scores) > 0]
violin_parts = plt.violinplot(scores_list, showmeans=False, showextrema=False)
for i, pc in enumerate(violin_parts["bodies"]):
pc.set_facecolor(colors[i % len(colors)])
pc.set_edgecolor(colors[i % len(colors)])
pc.set_alpha(0.3)
# add x labels
plt.xticks(np.arange(1, len(transforms) + 1), transforms, rotation=90)
# add standard errors around the means but don't plot the means
cis = [1.96 * np.std(scores[transform][1], ddof=1) / np.sqrt(len(scores[transform][1])) for transform in transforms]
for i, transform in enumerate(transforms):
plt.errorbar(
i + 1,
np.mean(scores[transform][1]),
yerr=cis[i],
fmt="o",
color=colors[i % len(colors)],
elinewidth=2,
capsize=20,
)
plt.title(f"{activation_name} {score_mode}")
plt.xlabel("Transform")
plt.ylabel("GPT-4-based interpretability score")
plt.xticks(rotation=90)
# and a thicker line at 0
plt.axhline(y=0, linestyle="-", color="black", linewidth=1)
plt.tight_layout()
save_path = os.path.join(results_folder, f"{score_mode}_means_and_violin.png")
print(f"Saving means and violin graph to {save_path}")
plt.savefig(save_path)
if __name__ == "__main__":
cfg: BaseArgs
if len(sys.argv) > 1 and sys.argv[1] == "read_results":
cfg = InterpGraphArgs()
if cfg.score_mode == "all":
score_modes = ["top", "random", "top_random"]
else:
score_modes = [cfg.score_mode]
base_path = "/mnt/ssd-cluster/auto_interp_results"
if cfg.run_all:
activation_names = [x for x in os.listdir(base_path) if os.path.isdir(os.path.join(base_path, x))]
else:
activation_names = [f"{cfg.model_name.split('/')[-1]}_layer{cfg.layer}_{cfg.layer_loc}"]
for activation_name in activation_names:
for score_mode in score_modes:
read_results(activation_name, score_mode)
elif len(sys.argv) > 1 and sys.argv[1] == "run_group":
cfg = InterpArgs()
run_from_grouped(cfg, cfg.load_interpret_autoencoder)
elif len(sys.argv) > 1 and sys.argv[1] == "big_sweep":
sys.argv.pop(1)
# l1_val = 0.00018478
l1_val = 0.0008577 # 8e-4 in logspace(-4, -2, 16)
# l1_val = 0.00083768 # 8e-4 in logspace(-4, -2, 14)
# l1_val = 0.0007197 # 8e-4 in logspace(-4, -2, 8)
# l1_val = 1e-3
# l1_val = 0.000316 # early one for mlp??
interpret_across_big_sweep(l1_val)
elif len(sys.argv) > 1 and sys.argv[1] == "all_baselines":
sys.argv.pop(1)
interpret_across_baselines()
elif len(sys.argv) > 1 and sys.argv[1] == "chunks":
l1_val = 0.0007197 # 8e-4 in logspace(-4, -2, 8)
sys.argv.pop(1)
interpret_across_chunks(l1_val)
else:
cfg = InterpArgs()
if os.path.isdir(cfg.load_interpret_autoencoder):
run_folder(cfg)
else:
learned_dict = torch.load(cfg.load_interpret_autoencoder, map_location=cfg.device)
save_folder = f"/mnt/ssd-cluster/auto_interp_results/l{cfg.layer}_{cfg.layer_loc}"
cfg.save_loc = os.path.join(save_folder, cfg.load_interpret_autoencoder)
run(learned_dict, cfg)
| [] |
2024-01-10 | npow/langchain | langchain~chains~combine_documents~stuff.py | """Chain that combines documents by stuffing into context."""
from typing import Any, Dict, List, Optional, Tuple
from pydantic import Extra, Field, root_validator
from langchain.chains.combine_documents.base import (
BaseCombineDocumentsChain,
format_document,
)
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
from langchain.prompts.base import BasePromptTemplate
from langchain.prompts.prompt import PromptTemplate
def _get_default_document_prompt() -> PromptTemplate:
return PromptTemplate(input_variables=["page_content"], template="{page_content}")
class StuffDocumentsChain(BaseCombineDocumentsChain):
"""Chain that combines documents by stuffing into context."""
llm_chain: LLMChain
"""LLM wrapper to use after formatting documents."""
document_prompt: BasePromptTemplate = Field(
default_factory=_get_default_document_prompt
)
"""Prompt to use to format each document."""
document_variable_name: str
"""The variable name in the llm_chain to put the documents in.
If only one variable in the llm_chain, this need not be provided."""
document_separator: str = "\n\n"
"""The string with which to join the formatted documents"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def get_default_document_variable_name(cls, values: Dict) -> Dict:
"""Get default document variable name, if not provided."""
llm_chain_variables = values["llm_chain"].prompt.input_variables
if "document_variable_name" not in values:
if len(llm_chain_variables) == 1:
values["document_variable_name"] = llm_chain_variables[0]
else:
raise ValueError(
"document_variable_name must be provided if there are "
"multiple llm_chain_variables"
)
else:
if values["document_variable_name"] not in llm_chain_variables:
raise ValueError(
f"document_variable_name {values['document_variable_name']} was "
f"not found in llm_chain input_variables: {llm_chain_variables}"
)
return values
def _get_inputs(self, docs: List[Document], **kwargs: Any) -> dict:
# Format each document according to the prompt
doc_strings = [format_document(doc, self.document_prompt) for doc in docs]
# Join the documents together to put them in the prompt.
inputs = {
k: v
for k, v in kwargs.items()
if k in self.llm_chain.prompt.input_variables
}
inputs[self.document_variable_name] = self.document_separator.join(doc_strings)
return inputs
def prompt_length(self, docs: List[Document], **kwargs: Any) -> Optional[int]:
"""Get the prompt length by formatting the prompt."""
inputs = self._get_inputs(docs, **kwargs)
prompt = self.llm_chain.prompt.format(**inputs)
return self.llm_chain.llm.get_num_tokens(prompt)
def combine_docs(self, docs: List[Document], **kwargs: Any) -> Tuple[str, dict]:
"""Stuff all documents into one prompt and pass to LLM."""
inputs = self._get_inputs(docs, **kwargs)
# Call predict on the LLM.
return self.llm_chain.predict(**inputs), {}
async def acombine_docs(
self, docs: List[Document], **kwargs: Any
) -> Tuple[str, dict]:
"""Stuff all documents into one prompt and pass to LLM."""
inputs = self._get_inputs(docs, **kwargs)
# Call predict on the LLM.
return await self.llm_chain.apredict(**inputs), {}
@property
def _chain_type(self) -> str:
return "stuff_documents_chain"
| [
"{page_content}"
] |
2024-01-10 | npow/langchain | langchain~tools~playwright~current_page.py | from __future__ import annotations
from typing import Type
from pydantic import BaseModel
from langchain.tools.playwright.base import BaseBrowserTool
from langchain.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class CurrentWebPageTool(BaseBrowserTool):
name: str = "current_webpage"
description: str = "Returns the URL of the current page"
args_schema: Type[BaseModel] = BaseModel
def _run(self) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
return str(page.url)
async def _arun(self) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
return str(page.url)
| [
"Returns the URL of the current page"
] |
2024-01-10 | npow/langchain | langchain~tools~file_management~write.py | from typing import Type
from pydantic import BaseModel, Field
from langchain.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileTool,
FileValidationError,
)
class WriteFileInput(BaseModel):
"""Input for WriteFileTool."""
file_path: str = Field(..., description="name of file")
text: str = Field(..., description="text to write to file")
append: bool = Field(
default=False, description="Whether to append to an existing file."
)
class WriteFileTool(BaseFileTool):
name: str = "write_file"
args_schema: Type[BaseModel] = WriteFileInput
description: str = "Write file to disk"
def _run(self, file_path: str, text: str, append: bool = False) -> str:
try:
write_path = self.get_relative_path(file_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path)
try:
write_path.parent.mkdir(exist_ok=True, parents=False)
mode = "a" if append else "w"
with write_path.open(mode, encoding="utf-8") as f:
f.write(text)
return f"File written successfully to {file_path}."
except Exception as e:
return "Error: " + str(e)
async def _arun(self, file_path: str, text: str, append: bool = False) -> str:
# TODO: Add aiofiles method
raise NotImplementedError
| [
"Write file to disk"
] |
2024-01-10 | npow/langchain | langchain~tools~file_management~delete.py | import os
from typing import Type
from pydantic import BaseModel, Field
from langchain.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileTool,
FileValidationError,
)
class FileDeleteInput(BaseModel):
"""Input for DeleteFileTool."""
file_path: str = Field(..., description="Path of the file to delete")
class DeleteFileTool(BaseFileTool):
name: str = "file_delete"
args_schema: Type[BaseModel] = FileDeleteInput
description: str = "Delete a file"
def _run(self, file_path: str) -> str:
try:
file_path_ = self.get_relative_path(file_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path)
if not file_path_.exists():
return f"Error: no such file or directory: {file_path}"
try:
os.remove(file_path_)
return f"File deleted successfully: {file_path}."
except Exception as e:
return "Error: " + str(e)
async def _arun(self, file_path: str) -> str:
# TODO: Add aiofiles method
raise NotImplementedError
| [
"Delete a file"
] |
2024-01-10 | npow/langchain | langchain~tools~file_management~move.py | import shutil
from typing import Type
from pydantic import BaseModel, Field
from langchain.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileTool,
FileValidationError,
)
class FileMoveInput(BaseModel):
"""Input for MoveFileTool."""
source_path: str = Field(..., description="Path of the file to move")
destination_path: str = Field(..., description="New path for the moved file")
class MoveFileTool(BaseFileTool):
name: str = "move_file"
args_schema: Type[BaseModel] = FileMoveInput
description: str = "Move or rename a file from one location to another"
def _run(self, source_path: str, destination_path: str) -> str:
try:
source_path_ = self.get_relative_path(source_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="source_path", value=source_path
)
try:
destination_path_ = self.get_relative_path(destination_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="destination_path_", value=destination_path_
)
if not source_path_.exists():
return f"Error: no such file or directory {source_path}"
try:
# shutil.move expects str args in 3.8
shutil.move(str(source_path_), destination_path_)
return f"File moved successfully from {source_path} to {destination_path}."
except Exception as e:
return "Error: " + str(e)
async def _arun(self, source_path: str, destination_path: str) -> str:
# TODO: Add aiofiles method
raise NotImplementedError
| [
"Move or rename a file from one location to another"
] |
2024-01-10 | npow/langchain | langchain~retrievers~document_compressors~chain_extract.py | """DocumentFilter that uses an LLM chain to extract the relevant parts of documents."""
from __future__ import annotations
from typing import Any, Callable, Dict, Optional, Sequence
from langchain import LLMChain, PromptTemplate
from langchain.retrievers.document_compressors.base import (
BaseDocumentCompressor,
)
from langchain.retrievers.document_compressors.chain_extract_prompt import (
prompt_template,
)
from langchain.schema import BaseLanguageModel, BaseOutputParser, Document
def default_get_input(query: str, doc: Document) -> Dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class NoOutputParser(BaseOutputParser[str]):
"""Parse outputs that could return a null string of some sort."""
no_output_str: str = "NO_OUTPUT"
def parse(self, text: str) -> str:
cleaned_text = text.strip()
if cleaned_text == self.no_output_str:
return ""
return cleaned_text
def _get_default_chain_prompt() -> PromptTemplate:
output_parser = NoOutputParser()
template = prompt_template.format(no_output_str=output_parser.no_output_str)
return PromptTemplate(
template=template,
input_variables=["question", "context"],
output_parser=output_parser,
)
class LLMChainExtractor(BaseDocumentCompressor):
llm_chain: LLMChain
"""LLM wrapper to use for compressing documents."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Compress page content of raw documents."""
compressed_docs = []
for doc in documents:
_input = self.get_input(query, doc)
output = self.llm_chain.predict_and_parse(**_input)
if len(output) == 0:
continue
compressed_docs.append(Document(page_content=output, metadata=doc.metadata))
return compressed_docs
async def acompress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
raise NotImplementedError
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
get_input: Optional[Callable[[str, Document], str]] = None,
llm_chain_kwargs: Optional[dict] = None,
) -> LLMChainExtractor:
"""Initialize from LLM."""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
_get_input = get_input if get_input is not None else default_get_input
llm_chain = LLMChain(llm=llm, prompt=_prompt, **(llm_chain_kwargs or {}))
return cls(llm_chain=llm_chain, get_input=_get_input)
| [] |
2024-01-10 | npow/langchain | langchain~tools~file_management~copy.py | import shutil
from typing import Type
from pydantic import BaseModel, Field
from langchain.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileTool,
FileValidationError,
)
class FileCopyInput(BaseModel):
"""Input for CopyFileTool."""
source_path: str = Field(..., description="Path of the file to copy")
destination_path: str = Field(..., description="Path to save the copied file")
class CopyFileTool(BaseFileTool):
name: str = "copy_file"
args_schema: Type[BaseModel] = FileCopyInput
description: str = "Create a copy of a file in a specified location"
def _run(self, source_path: str, destination_path: str) -> str:
try:
source_path_ = self.get_relative_path(source_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="source_path", value=source_path
)
try:
destination_path_ = self.get_relative_path(destination_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="destination_path", value=destination_path
)
try:
shutil.copy2(source_path_, destination_path_, follow_symlinks=False)
return f"File copied successfully from {source_path} to {destination_path}."
except Exception as e:
return "Error: " + str(e)
async def _arun(self, source_path: str, destination_path: str) -> str:
# TODO: Add aiofiles method
raise NotImplementedError
| [
"Create a copy of a file in a specified location"
] |
2024-01-10 | npow/langchain | langchain~tools~playwright~navigate.py | from __future__ import annotations
from typing import Type
from pydantic import BaseModel, Field
from langchain.tools.playwright.base import BaseBrowserTool
from langchain.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class NavigateToolInput(BaseModel):
"""Input for NavigateToolInput."""
url: str = Field(..., description="url to navigate to")
class NavigateTool(BaseBrowserTool):
name: str = "navigate_browser"
description: str = "Navigate a browser to the specified URL"
args_schema: Type[BaseModel] = NavigateToolInput
def _run(self, url: str) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
response = page.goto(url)
status = response.status if response else "unknown"
return f"Navigating to {url} returned status code {status}"
async def _arun(self, url: str) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
response = await page.goto(url)
status = response.status if response else "unknown"
return f"Navigating to {url} returned status code {status}"
| [
"Navigate a browser to the specified URL"
] |
2024-01-10 | npow/langchain | langchain~tools~playwright~get_elements.py | from __future__ import annotations
import json
from typing import TYPE_CHECKING, List, Optional, Sequence, Type
from pydantic import BaseModel, Field
from langchain.tools.playwright.base import BaseBrowserTool
from langchain.tools.playwright.utils import aget_current_page, get_current_page
if TYPE_CHECKING:
from playwright.async_api import Page as AsyncPage
from playwright.sync_api import Page as SyncPage
class GetElementsToolInput(BaseModel):
"""Input for GetElementsTool."""
selector: str = Field(
...,
description="CSS selector, such as '*', 'div', 'p', 'a', #id, .classname",
)
attributes: List[str] = Field(
default_factory=lambda: ["innerText"],
description="Set of attributes to retrieve for each element",
)
async def _aget_elements(
page: AsyncPage, selector: str, attributes: Sequence[str]
) -> List[dict]:
"""Get elements matching the given CSS selector."""
elements = await page.query_selector_all(selector)
results = []
for element in elements:
result = {}
for attribute in attributes:
if attribute == "innerText":
val: Optional[str] = await element.inner_text()
else:
val = await element.get_attribute(attribute)
if val is not None and val.strip() != "":
result[attribute] = val
if result:
results.append(result)
return results
def _get_elements(
page: SyncPage, selector: str, attributes: Sequence[str]
) -> List[dict]:
"""Get elements matching the given CSS selector."""
elements = page.query_selector_all(selector)
results = []
for element in elements:
result = {}
for attribute in attributes:
if attribute == "innerText":
val: Optional[str] = element.inner_text()
else:
val = element.get_attribute(attribute)
if val is not None and val.strip() != "":
result[attribute] = val
if result:
results.append(result)
return results
class GetElementsTool(BaseBrowserTool):
name: str = "get_elements"
description: str = (
"Retrieve elements in the current web page matching the given CSS selector"
)
args_schema: Type[BaseModel] = GetElementsToolInput
def _run(self, selector: str, attributes: Sequence[str] = ["innerText"]) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
# Navigate to the desired webpage before using this tool
results = _get_elements(page, selector, attributes)
return json.dumps(results)
async def _arun(
self, selector: str, attributes: Sequence[str] = ["innerText"]
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
# Navigate to the desired webpage before using this tool
results = await _aget_elements(page, selector, attributes)
return json.dumps(results)
| [] |
2024-01-10 | npow/langchain | langchain~tools~playwright~click.py | from __future__ import annotations
from typing import Type
from pydantic import BaseModel, Field
from langchain.tools.playwright.base import BaseBrowserTool
from langchain.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class ClickToolInput(BaseModel):
"""Input for ClickTool."""
selector: str = Field(..., description="CSS selector for the element to click")
class ClickTool(BaseBrowserTool):
name: str = "click_element"
description: str = "Click on an element with the given CSS selector"
args_schema: Type[BaseModel] = ClickToolInput
def _run(self, selector: str) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
# Navigate to the desired webpage before using this tool
page.click(selector)
return f"Clicked element '{selector}'"
async def _arun(self, selector: str) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
# Navigate to the desired webpage before using this tool
await page.click(selector)
return f"Clicked element '{selector}'"
| [
"Click on an element with the given CSS selector"
] |
2024-01-10 | npow/langchain | langchain~agents~load_tools.py | # flake8: noqa
"""Load tools."""
import warnings
from typing import Any, Dict, List, Optional, Callable, Tuple
from mypy_extensions import Arg, KwArg
from langchain.agents.tools import Tool
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.api import news_docs, open_meteo_docs, podcast_docs, tmdb_docs
from langchain.chains.api.base import APIChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.pal.base import PALChain
from langchain.llms.base import BaseLLM
from langchain.requests import TextRequestsWrapper
from langchain.tools.arxiv.tool import ArxivQueryRun
from langchain.tools.base import BaseTool
from langchain.tools.bing_search.tool import BingSearchRun
from langchain.tools.ddg_search.tool import DuckDuckGoSearchRun
from langchain.tools.google_search.tool import GoogleSearchResults, GoogleSearchRun
from langchain.tools.human.tool import HumanInputRun
from langchain.tools.python.tool import PythonREPLTool
from langchain.tools.requests.tool import (
RequestsDeleteTool,
RequestsGetTool,
RequestsPatchTool,
RequestsPostTool,
RequestsPutTool,
)
from langchain.tools.searx_search.tool import SearxSearchResults, SearxSearchRun
from langchain.tools.shell.tool import ShellTool
from langchain.tools.wikipedia.tool import WikipediaQueryRun
from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
from langchain.utilities import ArxivAPIWrapper
from langchain.utilities.apify import ApifyWrapper
from langchain.utilities.bash import BashProcess
from langchain.utilities.bing_search import BingSearchAPIWrapper
from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
from langchain.utilities.google_search import GoogleSearchAPIWrapper
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
from langchain.utilities.searx_search import SearxSearchWrapper
from langchain.utilities.serpapi import SerpAPIWrapper
from langchain.utilities.wikipedia import WikipediaAPIWrapper
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
def _get_python_repl() -> BaseTool:
return PythonREPLTool()
def _get_tools_requests_get() -> BaseTool:
return RequestsGetTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_post() -> BaseTool:
return RequestsPostTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_patch() -> BaseTool:
return RequestsPatchTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_put() -> BaseTool:
return RequestsPutTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_delete() -> BaseTool:
return RequestsDeleteTool(requests_wrapper=TextRequestsWrapper())
def _get_terminal() -> BaseTool:
return ShellTool()
_BASE_TOOLS: Dict[str, Callable[[], BaseTool]] = {
"python_repl": _get_python_repl,
"requests": _get_tools_requests_get, # preserved for backwards compatability
"requests_get": _get_tools_requests_get,
"requests_post": _get_tools_requests_post,
"requests_patch": _get_tools_requests_patch,
"requests_put": _get_tools_requests_put,
"requests_delete": _get_tools_requests_delete,
"terminal": _get_terminal,
}
def _get_pal_math(llm: BaseLLM) -> BaseTool:
return Tool(
name="PAL-MATH",
description="A language model that is really good at solving complex word math problems. Input should be a fully worded hard word math problem.",
func=PALChain.from_math_prompt(llm).run,
)
def _get_pal_colored_objects(llm: BaseLLM) -> BaseTool:
return Tool(
name="PAL-COLOR-OBJ",
description="A language model that is really good at reasoning about position and the color attributes of objects. Input should be a fully worded hard reasoning problem. Make sure to include all information about the objects AND the final question you want to answer.",
func=PALChain.from_colored_object_prompt(llm).run,
)
def _get_llm_math(llm: BaseLLM) -> BaseTool:
return Tool(
name="Calculator",
description="Useful for when you need to answer questions about math.",
func=LLMMathChain(llm=llm, callback_manager=llm.callback_manager).run,
coroutine=LLMMathChain(llm=llm, callback_manager=llm.callback_manager).arun,
)
def _get_open_meteo_api(llm: BaseLLM) -> BaseTool:
chain = APIChain.from_llm_and_api_docs(llm, open_meteo_docs.OPEN_METEO_DOCS)
return Tool(
name="Open Meteo API",
description="Useful for when you want to get weather information from the OpenMeteo API. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
_LLM_TOOLS: Dict[str, Callable[[BaseLLM], BaseTool]] = {
"pal-math": _get_pal_math,
"pal-colored-objects": _get_pal_colored_objects,
"llm-math": _get_llm_math,
"open-meteo-api": _get_open_meteo_api,
}
def _get_news_api(llm: BaseLLM, **kwargs: Any) -> BaseTool:
news_api_key = kwargs["news_api_key"]
chain = APIChain.from_llm_and_api_docs(
llm, news_docs.NEWS_DOCS, headers={"X-Api-Key": news_api_key}
)
return Tool(
name="News API",
description="Use this when you want to get information about the top headlines of current news stories. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_tmdb_api(llm: BaseLLM, **kwargs: Any) -> BaseTool:
tmdb_bearer_token = kwargs["tmdb_bearer_token"]
chain = APIChain.from_llm_and_api_docs(
llm,
tmdb_docs.TMDB_DOCS,
headers={"Authorization": f"Bearer {tmdb_bearer_token}"},
)
return Tool(
name="TMDB API",
description="Useful for when you want to get information from The Movie Database. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_podcast_api(llm: BaseLLM, **kwargs: Any) -> BaseTool:
listen_api_key = kwargs["listen_api_key"]
chain = APIChain.from_llm_and_api_docs(
llm,
podcast_docs.PODCAST_DOCS,
headers={"X-ListenAPI-Key": listen_api_key},
)
return Tool(
name="Podcast API",
description="Use the Listen Notes Podcast API to search all podcasts or episodes. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_wolfram_alpha(**kwargs: Any) -> BaseTool:
return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs))
def _get_google_search(**kwargs: Any) -> BaseTool:
return GoogleSearchRun(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_wikipedia(**kwargs: Any) -> BaseTool:
return WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(**kwargs))
def _get_arxiv(**kwargs: Any) -> BaseTool:
return ArxivQueryRun(api_wrapper=ArxivAPIWrapper(**kwargs))
def _get_google_serper(**kwargs: Any) -> BaseTool:
return Tool(
name="Serper Search",
func=GoogleSerperAPIWrapper(**kwargs).run,
description="A low-cost Google Search API. Useful for when you need to answer questions about current events. Input should be a search query.",
)
def _get_google_search_results_json(**kwargs: Any) -> BaseTool:
return GoogleSearchResults(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_serpapi(**kwargs: Any) -> BaseTool:
return Tool(
name="Search",
description="A search engine. Useful for when you need to answer questions about current events. Input should be a search query.",
func=SerpAPIWrapper(**kwargs).run,
coroutine=SerpAPIWrapper(**kwargs).arun,
)
def _get_searx_search(**kwargs: Any) -> BaseTool:
return SearxSearchRun(wrapper=SearxSearchWrapper(**kwargs))
def _get_searx_search_results_json(**kwargs: Any) -> BaseTool:
wrapper_kwargs = {k: v for k, v in kwargs.items() if k != "num_results"}
return SearxSearchResults(wrapper=SearxSearchWrapper(**wrapper_kwargs), **kwargs)
def _get_bing_search(**kwargs: Any) -> BaseTool:
return BingSearchRun(api_wrapper=BingSearchAPIWrapper(**kwargs))
def _get_ddg_search(**kwargs: Any) -> BaseTool:
return DuckDuckGoSearchRun(api_wrapper=DuckDuckGoSearchAPIWrapper(**kwargs))
def _get_human_tool(**kwargs: Any) -> BaseTool:
return HumanInputRun(**kwargs)
_EXTRA_LLM_TOOLS: Dict[
str, Tuple[Callable[[Arg(BaseLLM, "llm"), KwArg(Any)], BaseTool], List[str]]
] = {
"news-api": (_get_news_api, ["news_api_key"]),
"tmdb-api": (_get_tmdb_api, ["tmdb_bearer_token"]),
"podcast-api": (_get_podcast_api, ["listen_api_key"]),
}
_EXTRA_OPTIONAL_TOOLS: Dict[str, Tuple[Callable[[KwArg(Any)], BaseTool], List[str]]] = {
"wolfram-alpha": (_get_wolfram_alpha, ["wolfram_alpha_appid"]),
"google-search": (_get_google_search, ["google_api_key", "google_cse_id"]),
"google-search-results-json": (
_get_google_search_results_json,
["google_api_key", "google_cse_id", "num_results"],
),
"searx-search-results-json": (
_get_searx_search_results_json,
["searx_host", "engines", "num_results", "aiosession"],
),
"bing-search": (_get_bing_search, ["bing_subscription_key", "bing_search_url"]),
"ddg-search": (_get_ddg_search, []),
"google-serper": (_get_google_serper, ["serper_api_key"]),
"serpapi": (_get_serpapi, ["serpapi_api_key", "aiosession"]),
"searx-search": (_get_searx_search, ["searx_host", "engines", "aiosession"]),
"wikipedia": (_get_wikipedia, ["top_k_results", "lang"]),
"human": (_get_human_tool, ["prompt_func", "input_func"]),
}
def load_tools(
tool_names: List[str],
llm: Optional[BaseLLM] = None,
callback_manager: Optional[BaseCallbackManager] = None,
**kwargs: Any,
) -> List[BaseTool]:
"""Load tools based on their name.
Args:
tool_names: name of tools to load.
llm: Optional language model, may be needed to initialize certain tools.
callback_manager: Optional callback manager. If not provided, default global callback manager will be used.
Returns:
List of tools.
"""
tools = []
for name in tool_names:
if name == "requests":
warnings.warn(
"tool name `requests` is deprecated - "
"please use `requests_all` or specify the requests method"
)
if name == "requests_all":
# expand requests into various methods
requests_method_tools = [
_tool for _tool in _BASE_TOOLS if _tool.startswith("requests_")
]
tool_names.extend(requests_method_tools)
elif name in _BASE_TOOLS:
tools.append(_BASE_TOOLS[name]())
elif name in _LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
tool = _LLM_TOOLS[name](llm)
if callback_manager is not None:
tool.callback_manager = callback_manager
tools.append(tool)
elif name in _EXTRA_LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
_get_llm_tool_func, extra_keys = _EXTRA_LLM_TOOLS[name]
missing_keys = set(extra_keys).difference(kwargs)
if missing_keys:
raise ValueError(
f"Tool {name} requires some parameters that were not "
f"provided: {missing_keys}"
)
sub_kwargs = {k: kwargs[k] for k in extra_keys}
tool = _get_llm_tool_func(llm=llm, **sub_kwargs)
if callback_manager is not None:
tool.callback_manager = callback_manager
tools.append(tool)
elif name in _EXTRA_OPTIONAL_TOOLS:
_get_tool_func, extra_keys = _EXTRA_OPTIONAL_TOOLS[name]
sub_kwargs = {k: kwargs[k] for k in extra_keys if k in kwargs}
tool = _get_tool_func(**sub_kwargs)
if callback_manager is not None:
tool.callback_manager = callback_manager
tools.append(tool)
else:
raise ValueError(f"Got unknown tool {name}")
return tools
def get_all_tool_names() -> List[str]:
"""Get a list of all possible tool names."""
return (
list(_BASE_TOOLS)
+ list(_EXTRA_OPTIONAL_TOOLS)
+ list(_EXTRA_LLM_TOOLS)
+ list(_LLM_TOOLS)
)
| [] |
2024-01-10 | npow/langchain | langchain~tools~file_management~file_search.py | import fnmatch
import os
from typing import Type
from pydantic import BaseModel, Field
from langchain.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileTool,
FileValidationError,
)
class FileSearchInput(BaseModel):
"""Input for FileSearchTool."""
dir_path: str = Field(
default=".",
description="Subdirectory to search in.",
)
pattern: str = Field(
...,
description="Unix shell regex, where * matches everything.",
)
class FileSearchTool(BaseFileTool):
name: str = "file_search"
args_schema: Type[BaseModel] = FileSearchInput
description: str = (
"Recursively search for files in a subdirectory that match the regex pattern"
)
def _run(self, pattern: str, dir_path: str = ".") -> str:
try:
dir_path_ = self.get_relative_path(dir_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path)
matches = []
try:
for root, _, filenames in os.walk(dir_path_):
for filename in fnmatch.filter(filenames, pattern):
absolute_path = os.path.join(root, filename)
relative_path = os.path.relpath(absolute_path, dir_path_)
matches.append(relative_path)
if matches:
return "\n".join(matches)
else:
return f"No files found for pattern {pattern} in directory {dir_path}"
except Exception as e:
return "Error: " + str(e)
async def _arun(self, dir_path: str, pattern: str) -> str:
# TODO: Add aiofiles method
raise NotImplementedError
| [] |
2024-01-10 | npow/langchain | langchain~tools~playwright~navigate_back.py | from __future__ import annotations
from typing import Type
from pydantic import BaseModel
from langchain.tools.playwright.base import BaseBrowserTool
from langchain.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class NavigateBackTool(BaseBrowserTool):
"""Navigate back to the previous page in the browser history."""
name: str = "previous_webpage"
description: str = "Navigate back to the previous page in the browser history"
args_schema: Type[BaseModel] = BaseModel
def _run(self) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
response = page.go_back()
if response:
return (
f"Navigated back to the previous page with URL '{response.url}'."
f" Status code {response.status}"
)
else:
return "Unable to navigate back; no previous page in the history"
async def _arun(self) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
response = await page.go_back()
if response:
return (
f"Navigated back to the previous page with URL '{response.url}'."
f" Status code {response.status}"
)
else:
return "Unable to navigate back; no previous page in the history"
| [
"Navigate back to the previous page in the browser history"
] |
2024-01-10 | npow/langchain | langchain~tools~file_management~list_dir.py | import os
from typing import Type
from pydantic import BaseModel, Field
from langchain.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileTool,
FileValidationError,
)
class DirectoryListingInput(BaseModel):
"""Input for ListDirectoryTool."""
dir_path: str = Field(default=".", description="Subdirectory to list.")
class ListDirectoryTool(BaseFileTool):
name: str = "list_directory"
args_schema: Type[BaseModel] = DirectoryListingInput
description: str = "List files and directories in a specified folder"
def _run(self, dir_path: str = ".") -> str:
try:
dir_path_ = self.get_relative_path(dir_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path)
try:
entries = os.listdir(dir_path_)
if entries:
return "\n".join(entries)
else:
return f"No files found in directory {dir_path}"
except Exception as e:
return "Error: " + str(e)
async def _arun(self, dir_path: str) -> str:
# TODO: Add aiofiles method
raise NotImplementedError
| [
"List files and directories in a specified folder"
] |
2024-01-10 | npow/langchain | langchain~tools~playwright~extract_text.py | from __future__ import annotations
from typing import Type
from pydantic import BaseModel, root_validator
from langchain.tools.playwright.base import BaseBrowserTool
from langchain.tools.playwright.utils import aget_current_page, get_current_page
class ExtractTextTool(BaseBrowserTool):
name: str = "extract_text"
description: str = "Extract all the text on the current webpage"
args_schema: Type[BaseModel] = BaseModel
@root_validator
def check_acheck_bs_importrgs(cls, values: dict) -> dict:
"""Check that the arguments are valid."""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError:
raise ValueError(
"The 'beautifulsoup4' package is required to use this tool."
" Please install it with 'pip install beautifulsoup4'."
)
return values
def _run(self) -> str:
"""Use the tool."""
# Use Beautiful Soup since it's faster than looping through the elements
from bs4 import BeautifulSoup
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
html_content = page.content()
# Parse the HTML content with BeautifulSoup
soup = BeautifulSoup(html_content, "lxml")
return " ".join(text for text in soup.stripped_strings)
async def _arun(self) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
# Use Beautiful Soup since it's faster than looping through the elements
from bs4 import BeautifulSoup
page = await aget_current_page(self.async_browser)
html_content = await page.content()
# Parse the HTML content with BeautifulSoup
soup = BeautifulSoup(html_content, "lxml")
return " ".join(text for text in soup.stripped_strings)
| [
"Extract all the text on the current webpage"
] |
2024-01-10 | vivaan2006/Test_time | create_widget_functions.py | from PyQt5 import QtWidgets, QtCore
from PyQt5.QtCore import Qt, QThread, pyqtSignal
from PyQt5.QtGui import QIcon, QPixmap, QFontMetrics
from PyQt5.QtWidgets import QSpacerItem, QGraphicsDropShadowEffect, QSizePolicy, QLabel, QListWidgetItem, QTabWidget, QTabBar, QStylePainter, QStyleOptionTab, QStyle, QComboBox, QVBoxLayout, QHBoxLayout, QScrollArea, QTextEdit, QLineEdit, QPushButton, QWidget, QListWidget
import openai
openai.api_key = "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" # put the key here remove before pushing
class create_QComboBox:
def __init__(self, container, x_coordinate, y_coordinate, width, length):
# Creates and associates QComboBox to specified container
if container == "points_tab":
self.QComboBox = QtWidgets.QComboBox(self.points_tab)
# Geometry of QComboBox is specified by the passed function parameters
self.QComboBox.setGeometry(QtCore.QRect(x_coordinate, y_coordinate, width, length))
return self.QComboBox
class create_QCheckBox():
def __init__(self, container, x_coordinate, y_coordinate, width, length):
if container == "dashboard_tab":
self.QCheckBox = QtWidgets.QCheckBox(self.dashboard_tab)
elif container == "upcoming_events_tab":
self.QCheckBox = QtWidgets.QCheckBox(self.upcoming_events_tab)
elif container == "event":
self.QCheckBox = QtWidgets.QCheckBox(self.event_object)
self.QCheckBox.resize(width, length)
self.QCheckBox.move(x_coordinate, y_coordinate)
return self.QCheckBox
class create_QCalendar():
def __init__(self, container, x_coordinate, y_coordinate, width, length):
if container == "upcoming_events_tab":
self.QCalender = QtWidgets.QCalendarWidget(self.upcoming_events_tab)
elif container == "admin_events_tab":
self.QCalender = QtWidgets.QCalendarWidget(self.admin_events_tab)
self.QCalender.setGeometry(x_coordinate, y_coordinate, width, length)
return self.QCalender
class create_QLabel():
def __init__(self, container, object_name, text, x_coordinate, y_coordinate, width, length):
# Creates and associates QLabel to specified container
if container == "login_widget_container":
self.QLabel = QtWidgets.QLabel(self.login_widget_container)
elif container == "central_widget":
self.QLabel = QtWidgets.QLabel(self.central_widget)
elif container == "dashboard_tab":
self.QLabel = QtWidgets.QLabel(self.dashboard_tab)
elif container == "upcoming_events_tab":
self.QLabel = QtWidgets.QLabel(self.upcoming_events_tab)
elif container == "points_tab":
self.QLabel = QtWidgets.QLabel(self.points_tab)
elif container == "rewards_tab":
self.QLabel = QtWidgets.QLabel(self.rewards_tab)
elif container == "student_profile_tab":
self.QLabel = QtWidgets.QLabel(self.student_profile_tab)
elif container == "slideshow_description_groupbox":
self.QLabel = QtWidgets.QLabel(self.slideshow_description_groupbox)
elif container == "event":
self.QLabel = QtWidgets.QLabel(self.event_object)
elif container == "report_frame":
self.QLabel = QtWidgets.QLabel(self.report_frame)
elif container == "forgot_password_frame":
self.QLabel = QtWidgets.QLabel(self.forgot_password_frame)
elif container == "student_account_frame":
self.QLabel = QtWidgets.QLabel(self.student_account_frame)
# Administrator
elif container == "admin_dashboard_tab":
self.QLabel = QtWidgets.QLabel(self.admin_dashboard_tab)
elif container == "admin_events_tab":
self.QLabel = QtWidgets.QLabel(self.admin_events_tab)
elif container == "maps_tab":
self.QLabel = QtWidgets.QLabel(self.maps_tab)
elif container == "admin_statistics_tab":
self.QLabel = QtWidgets.QLabel(self.admin_statistics_tab)
elif container == "admin_student_view_tab":
self.QLabel = QtWidgets.QLabel(self.admin_student_view_tab)
elif container == "admin_statistics_tab":
self.QLabel = QtWidgets.QLabel(self.admin_statistics_tab)
elif container == "rand":
self.QLabel = QtWidgets.QLabel(self.rand_win_gb)
elif container == "top":
self.QLabel = QtWidgets.QLabel(self.top_win_gb)
elif container == "admin_output_report_frame":
self.QLabel = QtWidgets.QLabel(self.admin_output_report_frame)
elif container == "admin_student_support_tab":
self.QLabel = QtWidgets.QLabel(self.admin_student_support_tab)
elif container == "create_rewards_frame":
self.QLabel = QtWidgets.QLabel(self.create_rewards_frame)
elif container == "admin_account_frame":
self.QLabel = QtWidgets.QLabel(self.admin_account_frame)
self.QLabel.setWordWrap(True)
self.QLabel.setObjectName(object_name)
self.QLabel.setText(text)
# Geometry of QLabel is specified by the passed function parameters
self.QLabel.setGeometry(QtCore.QRect(x_coordinate, y_coordinate, width, length))
return self.QLabel
class create_QLineEdit():
def __init__(self, container, object_name, read_only, x_coordinate, y_coordinate, width, length):
# Creates and associates QLabel to specified container
if container == "login_widget_container":
self.QLineEdit = QtWidgets.QLineEdit(self.login_widget_container)
elif container == "dashboard_tab":
self.QLineEdit = QtWidgets.QLineEdit(self.dashboard_tab)
elif container == "admin_dashboard_tab":
self.QLineEdit = QtWidgets.QLineEdit(self.admin_dashboard_tab)
elif container == "upcoming_events_tab":
self.QLineEdit = QtWidgets.QLineEdit(self.upcoming_events_tab)
elif container == "points_tab":
self.QLineEdit = QtWidgets.QLineEdit(self.points_tab)
elif container == "rewards_tab":
self.QLineEdit = QtWidgets.QLineEdit(self.rewards_tab)
elif container == "student_profile_tab":
self.QLineEdit = QtWidgets.QLineEdit(self.student_profile_tab)
# Administrator
elif container == "admin_dashboard_tab":
self.QLineEdit = QtWidgets.QLineEdit(self.admin_dashboard_tab)
elif container == "admin_events_tab":
self.QLineEdit = QtWidgets.QLineEdit(self.admin_events_tab)
elif container == "maps_tab":
self.QLineEdit = QtWidgets.QLineEdit(self.maps_tab)
elif container == "admin_statistics_tab":
self.QLineEdit = QtWidgets.QLineEdit(self.admin_statistics_tab)
elif container == "admin_student_view_tab":
self.QLineEdit = QtWidgets.QLineEdit(self.admin_student_view_tab)
self.QLineEdit.setObjectName(object_name)
# user cannot type in the boxes
self.QLineEdit.setReadOnly(read_only)
# Geometry of QLineEdit is specified by the passed function parameters
self.QLineEdit.setFixedSize(width, length)
self.QLineEdit.move(x_coordinate, y_coordinate)
return self.QLineEdit
class create_QTextEdit():
def __init__(self, container, object_name, read_only, x_coordinate, y_coordinate, width, length):
# Creates and associates QLabel to specified container
if container == "login_widget_container":
self.QTextEdit = QtWidgets.QTextEdit(self.login_widget_container)
elif container == "dashboard_tab":
self.QTextEdit = QtWidgets.QTextEdit(self.dashboard_tab)
elif container == "admin_dashboard_tab":
self.QTextEdit = QtWidgets.QTextEdit(self.admin_dashboard_tab)
elif container == "upcoming_events_tab":
self.QTextEdit = QtWidgets.QTextEdit(self.upcoming_events_tab)
elif container == "points_tab":
self.QTextEdit = QtWidgets.QTextEdit(self.points_tab)
elif container == "rewards_tab":
self.QTextEdit = QtWidgets.QTextEdit(self.rewards_tab)
elif container == "student_profile_tab":
self.QTextEdit = QtWidgets.QTextEdit(self.student_profile_tab)
# Administrator
elif container == "admin_dashboard_tab":
self.QTextEdit = QtWidgets.QTextEdit(self.admin_dashboard_tab)
elif container == "admin_events_tab":
self.QTextEdit = QtWidgets.QTextEdit(self.admin_events_tab)
elif container == "maps_tab":
self.QTextEdit = QtWidgets.QTextEdit(self.maps_tab)
elif container == "admin_statistics_tab":
self.QTextEdit = QtWidgets.QTextEdit(self.admin_statistics_tab)
elif container == "admin_student_view_tab":
self.QTextEdit = QtWidgets.QTextEdit(self.admin_student_view_tab)
self.QTextEdit.setObjectName(object_name)
# user cannot type in the boxes
self.QTextEdit.setReadOnly(read_only)
# Geometry of QLineEdit is specified by the passed function parameters
self.QTextEdit.setFixedSize(width, length)
self.QTextEdit.move(x_coordinate, y_coordinate)
self.QTextEdit.setWordWrapMode(True)
return self.QTextEdit
class create_QScrollArea():
def __init__(self, container, object_name, layout, x_coordinate, y_coordinate, fixed_width, min_length):
self.scrollArea_object_container = QtWidgets.QWidget()
if container == "upcoming_events_tab":
self.QScrollArea = QtWidgets.QScrollArea(self.upcoming_events_tab)
elif container == "dashboard_tab":
self.QScrollArea = QtWidgets.QScrollArea(self.dashboard_tab)
elif container == "maps_tab":
self.QScrollArea = QtWidgets.QScrollArea(self.maps_tab)
elif container == "points_tab":
self.QScrollArea = QtWidgets.QScrollArea(self.points_tab)
elif container == "rewards_tab":
self.QScrollArea = QtWidgets.QScrollArea(self.rewards_tab)
elif container == "admin_statistics_tab":
self.QScrollArea = QtWidgets.QScrollArea(self.admin_statistics_tab)
elif container == "report_frame":
self.QScrollArea = QtWidgets.QScrollArea(self.report_frame)
self.QScrollArea.setFixedWidth(fixed_width)
self.QScrollArea.setFixedHeight(min_length)
self.QScrollArea.move(x_coordinate, y_coordinate)
self.QScrollArea.setWidgetResizable(True)
self.QScrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
if layout == "vertical_layout":
self.scroll_vertical_layout = QtWidgets.QVBoxLayout(self.scrollArea_object_container)
self.scrollArea_object_container.setLayout(self.scroll_vertical_layout)
return [self.scrollArea_object_container, self.scroll_vertical_layout, self.QScrollArea]
elif layout == "grid_layout":
self.scroll_grid_layout = QtWidgets.QGridLayout(self.scrollArea_object_container)
self.scrollArea_object_container.setLayout(self.scroll_grid_layout)
self.QScrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
return [self.scrollArea_object_container, self.scroll_grid_layout, self.QScrollArea]
class create_QFrame():
def __init__(self, container, object_name, orientation, x_coordinate, y_coordinate, width, length):
if container == "login_widget_container":
self.QFrame = QtWidgets.QFrame(self.login_widget_container)
elif container == "dashboard_tab":
self.QFrame = QtWidgets.QFrame(self.dashboard_tab)
elif container == "admin_dashboard_tab":
self.QFrame = QtWidgets.QFrame(self.admin_dashboard_tab)
elif container == "upcoming_events_tab":
self.QFrame = QtWidgets.QFrame(self.upcoming_events_tab)
elif container == "points_tab":
self.QFrame = QtWidgets.QFrame(self.points_tab)
elif container == "rewards_tab":
self.QFrame = QtWidgets.QFrame(self.rewards_tab)
elif container == "student_profile_tab":
self.QFrame = QtWidgets.QFrame(self.student_profile_tab)
elif container == "report_frame":
self.QFrame = QtWidgets.QFrame(self.report_frame)
elif container == "forgot_password_frame":
self.QFrame = QtWidgets.QFrame(self.forgot_password_frame)
elif container == "student_account_frame":
self.QFrame = QtWidgets.QFrame(self.student_account_frame)
# Administrator
elif container == "admin_dashboard_tab":
self.QFrame = QtWidgets.QFrame(self.admin_dashboard_tab)
elif container == "admin_events_tab":
self.QFrame = QtWidgets.QFrame(self.admin_events_tab)
elif container == "maps_tab":
self.QFrame = QtWidgets.QFrame(self.maps_tab)
elif container == "admin_statistics_tab":
self.QFrame = QtWidgets.QFrame(self.admin_statistics_tab)
elif container == "admin_student_view_tab":
self.QFrame = QtWidgets.QFrame(self.admin_student_view_tab)
elif container == "admin_output_report_frame":
self.QFrame = QtWidgets.QFrame(self.admin_output_report_frame)
elif container == "admin_student_support_tab":
self.QFrame = QtWidgets.QFrame(self.admin_student_support_tab)
elif container == "create_rewards_frame":
self.QFrame = QtWidgets.QFrame(self.create_rewards_frame)
elif container == "admin_account_frame":
self.QFrame = QtWidgets.QFrame(self.admin_account_frame)
self.QFrame.setObjectName(object_name)
self.QFrame.setGeometry(QtCore.QRect(x_coordinate, y_coordinate, width, length))
if orientation == "VLine":
self.QFrame.setFrameShape(QtWidgets.QFrame.VLine)
else:
self.QFrame.setFrameShape(QtWidgets.QFrame.HLine)
class create_QPushButton():
def __init__(self, container, object_name, text, icon, x_coordinate, y_coordinate, width, length):
# Creates and associates QLabel to specified container
if container == "login_widget_container":
self.QPushButton = QtWidgets.QPushButton(self.login_widget_container)
elif container == "central_widget":
self.QPushButton = QtWidgets.QPushButton(self.central_widget)
elif container == "student_profile_tab":
self.QPushButton = QtWidgets.QPushButton(self.student_profile_tab)
elif container == "rewards_tab":
self.QPushButton = QtWidgets.QPushButton(self.rewards_tab)
elif container == "admin_statistics_tab":
self.QPushButton = QtWidgets.QPushButton(self.admin_statistics_tab)
self.QPushButton.setObjectName(object_name)
if text != "None":
self.QPushButton.setText(text)
if icon != "None":
self.QPushButton.setIcon(QIcon(icon))
# Geometry of QLineEdit is specified by the passed function parameters
self.QPushButton.setFixedSize(width, length)
self.QPushButton.move(x_coordinate, y_coordinate)
return self.QPushButton
class create_horizontal_QSlider():
def __init__(self, container, x_coordinate, y_coordinate, width, length):
if container == "dashboard_tab":
self.QSlider = QtWidgets.QSlider(Qt.Horizontal, self.dashboard_tab)
self.QSlider.setGeometry(x_coordinate, y_coordinate, width, length)
return self.QSlider
class TabBar(QTabBar):
def tabSizeHint(self, index):
self.setGeometry(0, 120, 180, 700)
s = QTabBar.tabSizeHint(self, index)
s.transpose()
return s
def paintEvent(self, event):
painter = QStylePainter(self)
opt = QStyleOptionTab()
for i in range(self.count()):
self.initStyleOption(opt, i)
painter.drawControl(QStyle.CE_TabBarTabShape, opt)
painter.save()
s = opt.rect.size()
s.transpose()
r = QtCore.QRect(QtCore.QPoint(), s)
r.moveCenter(opt.rect.center())
opt.rect = r
c = self.tabRect(i).center()
painter.translate(c)
painter.rotate(90)
painter.translate(-c)
painter.drawControl(QStyle.CE_TabBarTabLabel, opt)
painter.restore()
class VerticalTabWidget(QTabWidget):
def __init__(self, *args, **kwargs):
QTabWidget.__init__(self, *args, **kwargs)
self.setTabBar(TabBar())
self.setTabPosition(QtWidgets.QTabWidget.West)
self.setStyleSheet("""
QTabBar::tab {
height: 180px;
width: 50px;
background-color: #202020;
color: white;
font-size:10pt;
}
"""
)
#self.setStyleSheet("QTabBar::tab {width: 50px;}")
class ChatGPTWindowWidget(QWidget):
def __init__(self):
super().__init__()
self.setFixedHeight(570)
self.init_ui()
def init_ui(self):
layout = QVBoxLayout()
shadow1 = QGraphicsDropShadowEffect()
shadow1.setBlurRadius(20)
self.list_widget = QListWidget()
self.list_widget.setGraphicsEffect(shadow1)
self.line_edit = QLineEdit()
self.line_edit.setPlaceholderText("Ask Me Anything...")
self.line_edit.setFixedHeight(30)
self.line_edit.setStyleSheet("border-radius:5px; font-size: 10pt; border: 1px solid gray")
# Enable the clear button in the QLineEdit
self.line_edit.setClearButtonEnabled(False)
# Create an icon for the button
icon = QIcon("ChatGPT Icons/send.svg")
# Create an action with the icon
action = self.line_edit.addAction(icon, QLineEdit.TrailingPosition)
# Connect a slot to the triggered signal of the action
action.triggered.connect(self.send_prompt)
# creating a QGraphicsDropShadowEffect object
shadow = QGraphicsDropShadowEffect()
shadow.setBlurRadius(10)
self.line_edit.returnPressed.connect(self.send_prompt)
self.line_edit.setGraphicsEffect(shadow)
layout.addWidget(self.list_widget)
layout.addWidget(self.line_edit)
self.setLayout(layout)
#self.add_prompt_widget("Hello")
def send_prompt(self):
text = self.line_edit.text()
# Fetch user prompt
self.add_prompt_widget(text)
# Stop thread if already running
try:
self.request_thread.exit()
except:
pass
# Create and start new thread
self.request_thread = RequestThread()
self.request_thread.prompt = text
self.request_thread.response_signal.connect(self.add_response_widget)
self.request_thread.start()
self.line_edit.clear()
def add_prompt_widget(self, text):
list_item = QListWidgetItem()
# Create a QLabel widget with the item text
prompt_widget = ChatGPTPromptWidget(text)
# Set the label widget as the list item widget
list_item.setSizeHint(prompt_widget.sizeHint())
self.list_widget.addItem(list_item)
self.list_widget.setItemWidget(list_item, prompt_widget)
def add_response_widget(self, text):
list_item = QListWidgetItem()
# Create a QLabel widget with the item text
prompt_widget = ChatGPTResponseWidget(text)
# Set the label widget as the list item widget
list_item.setSizeHint(prompt_widget.sizeHint())
self.list_widget.addItem(list_item)
self.list_widget.setItemWidget(list_item, prompt_widget)
class ChatGPTPromptWidget(QWidget):
def __init__(self, text):
super().__init__()
self.text = text
self.initUI()
def initUI(self):
# Create the main horizontal layout
layout = QHBoxLayout(self)
vbox_layout = QVBoxLayout()
# Create an image label and add it to the layout
image_label = QLabel(self)
image_label.setStyleSheet("background-color:None")
pixmap = QPixmap(r'ChatGPT Icons/user.png') # Provide the path to your image file
image_label.setPixmap(pixmap.scaledToHeight(50))
image_label.setStyleSheet("padding-top:0px")
spacer = QSpacerItem(40, 20, QSizePolicy.Minimum, QSizePolicy.Expanding)
vbox_layout.addWidget(image_label)
vbox_layout.addItem(spacer)
layout.addLayout(vbox_layout)
self.text_widget = QLabel(self)
self.text_widget.setAlignment(Qt.AlignLeft)
self.text_widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.text_widget.setWordWrap(True)
self.text_widget.setStyleSheet("font-size: 11pt")
layout.addWidget(self.text_widget)
# Set the main layout for the widget
self.setLayout(layout)
self.text_widget.setText(self.text)
class ChatGPTResponseWidget(QWidget):
def __init__(self, text):
super().__init__()
self.text = text
self.initUI()
def initUI(self):
# Create the main horizontal layout
layout = QHBoxLayout(self)
vbox_layout = QVBoxLayout()
# Create an image label and add it to the layout
image_label = QLabel(self)
pixmap = QPixmap(r'ChatGPT Icons/chatbot.png') # Provide the path to your image file
image_label.setPixmap(pixmap.scaledToHeight(50))
image_label.setStyleSheet("padding-top:0px")
spacer = QSpacerItem(40, 20, QSizePolicy.Minimum, QSizePolicy.Expanding)
vbox_layout.addWidget(image_label)
vbox_layout.addItem(spacer)
layout.addLayout(vbox_layout)
self.text_widget = QLabel(self)
self.text_widget.setAlignment(Qt.AlignLeft)
self.text_widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.text_widget.setWordWrap(True)
self.text_widget.setStyleSheet("font-size: 11pt")
layout.addWidget(self.text_widget)
# Set the main layout for the widget
self.setLayout(layout)
self.text_widget.setText(self.text)
# Seperate thread to make requests to OpenAI and wait for responses
class RequestThread(QThread):
prompt = ""
response_signal = pyqtSignal(str)
def run(self):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a chatbot"},
{"role": "user", "content": self.prompt},
]
)
result = ''
for choice in response.choices:
result += choice.message.content
# Send results back to main thread
self.response_signal.emit(result) | [
"You are a chatbot"
] |
2024-01-10 | YuanGongND/ltu | src~ltu~eval~eval_bj.py | # -*- coding: utf-8 -*-
# @Time : 4/10/23 5:05 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : eval_llm_cla.py
# evaluation classification based on gpt/bert embeddings
import os.path
import openai
import numpy as np
import math
import json
import string
import torch
import numpy as np
from collections import OrderedDict
from transformers import AutoTokenizer, BertModel
from sklearn.metrics import accuracy_score, classification_report
from stats import calculate_stats
dataset = 'bj'
llm_task = 'caption'
text_embed_setting = 'gpt'
base_path = '/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/'
eval_file_list = ['bj_formal_audio_lora_mix_from_proj_fz_no_adapter_checkpoint-22000_caption_0.10_0.95_500_repp110.json']
eval_file_list = [ base_path + x for x in eval_file_list]
for x in eval_file_list:
assert os.path.exists(x) == True
num_class = 4
device = "cuda" if torch.cuda.is_available() else "cpu"
bert_mdl_size = 'bert-large-uncased'
bert_tokenizer = AutoTokenizer.from_pretrained(bert_mdl_size, model_max_length=512)
bert_model = BertModel.from_pretrained(bert_mdl_size).to(device)
for eval_file in eval_file_list:
try:
def get_bert_embedding(input_text):
input_text = remove_punctuation_and_lowercase(input_text)
#print(input_text)
inputs = bert_tokenizer(input_text, return_tensors="pt")
if inputs['input_ids'].shape[1] > 512:
inputs['input_ids'] = inputs['input_ids'][:, :512]
inputs['token_type_ids'] = inputs['token_type_ids'][:, :512]
inputs['attention_mask'] = inputs['attention_mask'][:, :512]
#print('trim the length')
#print(inputs['input_ids'].shape)
outputs = bert_model(**inputs.to(device))
last_hidden_states = torch.mean(outputs.last_hidden_state[0], dim=0).cpu().detach().numpy()
return last_hidden_states
def get_gpt_embedding(input_text, mdl_size='text-embedding-ada-002'):
openai.api_key = 'your_open_ai_key'
response = openai.Embedding.create(
input=input_text,
model=mdl_size
)
embeddings = response['data'][0]['embedding']
return embeddings
def cosine_similarity(vector1, vector2):
dot_product = sum(v1 * v2 for v1, v2 in zip(vector1, vector2))
magnitude1 = math.sqrt(sum(v1 ** 2 for v1 in vector1))
magnitude2 = math.sqrt(sum(v2 ** 2 for v2 in vector2))
return dot_product / (magnitude1 * magnitude2)
def remove_punctuation_and_lowercase(text):
text = text.translate(str.maketrans('', '', string.punctuation))
text = text.lower()
return text
def gen_cm(all_truth, all_pred, save_name):
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
# list of label names
label_names = list(label_dict.keys())
# generate confusion matrix
cm = confusion_matrix(all_truth, all_pred)
# plot confusion matrix as a figure
plt.imshow(cm, cmap=plt.cm.Blues)
plt.title("Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(label_names))
plt.xticks(tick_marks, label_names, rotation=90, fontsize=6)
plt.yticks(tick_marks, label_names, fontsize=6)
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
# add label values to the confusion matrix cells
for i in range(len(label_names)):
for j in range(len(label_names)):
plt.text(j, i, cm[i, j], ha="center", va="center", color="white")
plt.savefig(save_name, dpi=300)
label_list = np.loadtxt('/data/sls/scratch/yuangong/audiollm/src/data/prep_data/eval_sets/labels/class_labels_indices_bj.csv', delimiter=',', dtype=str, skiprows=1)
# load cached label embedding dict
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting)):
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'r') as f:
json_str = f.read()
label_dict = json.loads(json_str, object_pairs_hook=OrderedDict)
else:
label_dict = OrderedDict()
for i in range(label_list.shape[0]):
class_code = label_list[i, 1]
class_name = label_list[i, 2][1:-1]
if text_embed_setting == 'gpt':
label_dict[class_name] = get_gpt_embedding('sound of ' + class_name.replace('_', ' ').lower())
elif text_embed_setting == 'bert':
label_dict[class_name] = get_bert_embedding('sound of ' + class_name.replace('_', ' ').lower())
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'w') as f:
json_str = json.dumps(label_dict)
f.write(json_str)
print(label_dict.keys())
with open(eval_file, 'r') as fp:
eval_data = json.load(fp)
print(eval_data[0])
print(eval_data[0]['pred'].split(':')[-2].split('.')[0][1:].split(';'))
print(eval_data[0]['audio_id'])
print(eval_data[0]['pred'].split(':')[-1][1:])
print(eval_data[0]['ref'].split(':')[-1])
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting)) == True:
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting), 'r') as f:
embed_cache = f.read()
embed_cache = json.loads(embed_cache)
else:
embed_cache = {}
def get_pred(cur_pred_list, label_dict, mode='max'):
# at beginning, all zero scores
score = np.zeros(num_class)
label_embed_list = list(label_dict.values())
# pred might not be a single text
for cur_pred in cur_pred_list:
if cur_pred in embed_cache:
cur_pred_embed = embed_cache[cur_pred]
else:
if text_embed_setting == 'gpt':
cur_pred_embed = get_gpt_embedding(cur_pred)
else:
cur_pred_embed = get_bert_embedding(cur_pred)
embed_cache[cur_pred] = cur_pred_embed
for i in range(num_class):
if mode == 'accu':
score[i] = score[i] + cosine_similarity(cur_pred_embed, label_embed_list[i])
elif mode == 'max':
score[i] = max(score[i], cosine_similarity(cur_pred_embed, label_embed_list[i]))
cur_pred = np.argmax(score)
return cur_pred
num_sample = len(eval_data)
print('number of samples {:d}'.format(num_sample))
all_pred = np.zeros([num_sample, num_class])
all_truth = np.zeros([num_sample, num_class])
for i in range(num_sample):
cur_audio_id = eval_data[i]['audio_id']
if llm_task == 'cla':
cur_pred_list = eval_data[i]['pred'].split(':')[-1].split('.')[0][1:].split(';')
cur_pred_list = ['sound of ' + x.lower().lstrip() for x in cur_pred_list]
elif llm_task == 'caption':
cur_pred_list = eval_data[i]['pred'].split(':')[-1][1:]
cur_pred_list = ['sound of ' + cur_pred_list.lower()]
cur_truth = eval_data[i]['ref'].split(':')[-1].replace('\xad', "").replace(',', "")
cur_truth_idx = list(label_dict.keys()).index(cur_truth)
print(cur_truth_idx)
cur_pred_idx = get_pred(cur_pred_list, label_dict)
print('Truth: ', cur_truth_idx, list(label_dict.keys())[cur_truth_idx], 'Pred: ', cur_pred_idx, list(label_dict.keys())[cur_pred_idx], cur_truth_idx==cur_pred_idx, cur_pred_list)
all_pred[i, cur_pred_idx] = 1.0
all_truth[i, cur_truth_idx] = 1.0
save_fold = "/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/{:s}_{:s}_{:s}_cla_report".format('.'.join(eval_file.split('/')[-1].split('.')[:-1]), llm_task, text_embed_setting)
if os.path.exists(save_fold) == False:
os.makedirs(save_fold)
np.save(save_fold + '/all_pred.npy', all_pred)
np.save(save_fold + '/all_truth.npy', all_truth)
stats = calculate_stats(all_pred, all_truth)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
acc = stats[0]['acc']
np.savetxt(save_fold + '/result_summary.csv', [mAP, mAUC, acc], delimiter=',')
embed_cache = json.dumps(embed_cache)
save_cache_path = '/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format(dataset, llm_task, text_embed_setting)
with open(save_cache_path, 'w') as f:
f.write(embed_cache)
sk_acc = accuracy_score(all_truth, all_pred)
print('bj accuracy: ', acc, sk_acc)
report = classification_report(all_truth, all_pred, target_names=list(label_dict.keys()))
with open(save_fold + "/cla_summary.txt", "w") as f:
f.write(report)
gen_cm(all_truth.argmax(axis=1), all_pred.argmax(axis=1), save_fold + "/cm.png")
except:
pass | [] |
2024-01-10 | YuanGongND/ltu | src~ltu_as~qa_generation~gpt_qa_batch_jointas.py | # -*- coding: utf-8 -*-
# @Time : 4/29/23 3:04 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : gpt_qa.py
# joint audio and speech
import openai
import numpy as np
import json
import re
import time
import pickle
import sys
split_id = sys.argv[1]
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
# by combining speech content, fundamental frequency, speed, and volume information
sys_prompt = """Based on the following recording containing speech and background sounds, generate 10 different types of complex open-ended questions related to both spoken text and background sounds that require step-by-step thinking, and corresponding answers.
Do not ask questions that cannot be determined or are unclear based on given recording. Answers need to be at least 10 words and does not contain "unclear", "cannot be determine", etc.
Questions should be e.g., Why the background sound and speech appear together? How speech content and background sounds related and why? What can be inferred from the speech content and background sounds together? How do the speaker react to the background sounds and why? In which scenario such speech content and background sounds would appear together and why? etc.
Format each QA pair in a single line as a JSON dictionary (key "q" for question, and "a" for answer, wrapped with { and }). Do not include any other explanation."""
def generate_prompt(entry):
if 'Singing' not in entry['audio_tag']:
prompt = """In the recording, background sound of {:s}, and speech of "{:s}" is heard. """
prompt = prompt.format(', '.join(entry['audio_tag']), entry['text'])
else:
prompt = """In the recording, background sound of {:s} and singing of "{:s}" is heard. """
prompt = prompt.format(', '.join(entry['audio_tag']), entry['text'])
return prompt
def remove_symbols(string):
symbols = ',"}{'
return string.strip(symbols).strip()
def extract_parts(string):
start_index = string.find('"q"') + 4
end_index = string.find('"a"')
part1 = string[start_index:end_index].strip()
start_index = end_index + 4
part2 = string[start_index:].strip()
return {"q": remove_symbols(part1), "a": remove_symbols(part2)}
def decode_output(gpt_out):
#print(gpt_out)
gpt_out = gpt_out.replace('\n', '')
# gpt_out = gpt_out.replace("'q'", "\"q\"")
# gpt_out = gpt_out.replace("'a'", "\"a\"")
gpt_out = re.findall(r'\{.*?\}', gpt_out)
qa_list = []
for x in gpt_out:
try:
qa_list.append(json.loads(x))
except Exception as e:
try:
qa_list.append(extract_parts(x))
except:
print(x)
return qa_list
def save_list_of_lists_to_disk(data, filename):
with open(filename, 'w') as f:
json.dump(data, f, indent=1)
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(50))
def complete_with_backoff(**kwargs):
return openai.ChatCompletion.create(**kwargs)
def generate_qa(prompt_list, output_file, max_tokens=1024, total_n_completions=1, model_engine="gpt-3.5-turbo"):
model_engine, key_id = model_engine.split('_')[0], model_engine.split('_')[1]
if key_id == 'l':
# TODO: change to your own openai key
openai.api_key = 'your_openai_key'
elif key_id == 'g':
# TODO: change to your own openai key
openai.api_key = 'your_openai_key'
all_outputs = []
raw_outputs = []
used_token = 0
for prompt_pair in prompt_list:
try:
#print(prompt_pair)
audio_id, cur_text, prompt = prompt_pair[0], prompt_pair[1], prompt_pair[2]
print(cur_text)
response = complete_with_backoff(
model=model_engine,
messages=[{"role": "system", "content": sys_prompt},
{"role": "user", "content": prompt + '" {text}"'}], # }
max_tokens=max_tokens,
n=total_n_completions)
cur_completion = response.choices[0].message.content.strip()
raw_outputs.append([cur_completion])
used_token += len(cur_completion)/4 + len(prompt)/4
cur_prompt_outputs = decode_output(cur_completion)
for j in range(len(cur_prompt_outputs)):
new_entry ={}
new_entry['audio_id'] = audio_id
new_entry['input'] = cur_text
new_entry['instruction'] = cur_prompt_outputs[j]['q']
new_entry['output'] = cur_prompt_outputs[j]['a']
all_outputs.append(new_entry)
if len(all_outputs) % 10 == 0:
print('{:d} questions generated, {:d} tokens'.format(len(all_outputs), int(used_token)))
with open(output_file, 'w') as f:
json.dump(all_outputs, f, indent=1)
save_list_of_lists_to_disk(raw_outputs, output_file[:-4]+'_raw.json')
except Exception as e:
print(e)
with open(output_file, 'w') as f:
print(len(all_outputs))
json.dump(all_outputs, f, indent=1)
model_list = ['gpt-3.5-turbo-0301_l', 'gpt-3.5-turbo-0301_g', 'gpt-3.5-turbo-0613_l', 'gpt-3.5-turbo-0613_g', "gpt-3.5-turbo_l", "gpt-3.5-turbo_g"]
model_engine= model_list[int(split_id) % len(model_list)]
with open('/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/speech_qa/open_end/datafiles/qa/joint_audioset/{:s}.json'.format(split_id), 'r') as fp:
data = json.load(fp)
print(len(data))
prompt_list = [[data[i]['audio_id'], data[i]['text'], generate_prompt(data[i])] for i in range(len(data))]
begin = time.time()
generate_qa(prompt_list, '/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/speech_qa/open_end/datafiles/qa/joint_audioset_out/joint_as_{:s}.json'.format(split_id), model_engine=model_engine)
end = time.time()
print('time eclipse, ', end-begin) | [
"Based on the following recording containing speech and background sounds, generate 10 different types of complex open-ended questions related to both spoken text and background sounds that require step-by-step thinking, and corresponding answers. \nDo not ask questions that cannot be determined or are unclear based on given recording. Answers need to be at least 10 words and does not contain \"unclear\", \"cannot be determine\", etc.\nQuestions should be e.g., Why the background sound and speech appear together? How speech content and background sounds related and why? What can be inferred from the speech content and background sounds together? How do the speaker react to the background sounds and why? In which scenario such speech content and background sounds would appear together and why? etc.\nFormat each QA pair in a single line as a JSON dictionary (key \"q\" for question, and \"a\" for answer, wrapped with { and }). Do not include any other explanation.",
"PLACEHOLDER\" {text}\"",
"In the recording, background sound of {:s} and singing of \"{:s}\" is heard. ",
"In the recording, background sound of {:s}, and speech of \"{:s}\" is heard. ",
", "
] |
2024-01-10 | YuanGongND/ltu | src~ltu_as~qa_generation~gpt_qa_batch_fma.py | # -*- coding: utf-8 -*-
# @Time : 4/29/23 3:04 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : gpt_qa.py
import openai
import numpy as np
import json
import re
import time
import pickle
import sys
split_id = sys.argv[1]
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
# by combining speech content, fundamental frequency, speed, and volume information
sys_prompt = """Based on the following music, generate 5 different types of complex open-ended questions that require step-by-step thinking, and corresponding answers.
Only ask questions about the given music. No general/background questions. Do not use the given information in the question.
Ask short, crispy, complex, and diverse questions. Answers need to be longer than 10 words.
Only ask questions that have a clear answer based on the given music. The questions need to be of different types.
Questions can be e.g., How to describe the music; What can be inferred from the music and why; What is the genre of the music and why; What can be inferred from the melody and lyrics; What is the potential scenario that the musci can be played; If the music is special (e.g., urgent, funny, interesting, abnormal, unique, etc) and why; what mood the music conveys based on lyrics and melody; what can be the potential title of the music, etc.
Format each QA pair in a single line as a JSON dictionary (key "q" for question, and "a" for answer, wrapped with { and }). Do not include any other explanation."""
def generate_prompt(entry):
prompt = """Music genre: {:s}; Music Lyrics: "{:s}"; Suggest Music Title: "{:s}" """
prompt = prompt.format(', '.join(entry['genre']), entry['text'], entry['title'])
return prompt
def decode_output(gpt_out):
#print(gpt_out)
gpt_out = gpt_out.replace('\n', '')
# gpt_out = gpt_out.replace("'q'", "\"q\"")
# gpt_out = gpt_out.replace("'a'", "\"a\"")
gpt_out = re.findall(r'\{.*?\}', gpt_out)
qa_list = [json.loads(x) for x in gpt_out]
return qa_list
def save_list_of_lists_to_disk(data, filename):
with open(filename, 'w') as f:
json.dump(data, f, indent=1)
#print(f"List of lists saved to {filename}.")
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(10))
def complete_with_backoff(**kwargs):
return openai.ChatCompletion.create(**kwargs)
def generate_qa(prompt_list, output_file, max_tokens=1024, total_n_completions=1, model_engine="gpt-3.5-turbo"):
model_engine, key_id = model_engine.split('_')[0], model_engine.split('_')[1]
if key_id == 'l':
# TODO: change to your own openai key
openai.api_key = 'your_openai_key'
elif key_id == 'g':
# TODO: change to your own openai key
openai.api_key = 'your_openai_key'
all_outputs = []
raw_outputs = []
used_token = 0
for prompt_pair in prompt_list:
try:
print(prompt_pair)
audio_id, prompt = prompt_pair[0], prompt_pair[1]
print(sys_prompt + '\n' + prompt)
response = complete_with_backoff(
model=model_engine,
messages=[{"role": "system", "content": sys_prompt},
{"role": "user", "content": prompt + '" {text}"'}], # }
max_tokens=max_tokens,
n=total_n_completions)
cur_completion = response.choices[0].message.content.strip()
raw_outputs.append([cur_completion])
used_token += len(cur_completion)/4 + len(prompt)/4
cur_prompt_outputs = decode_output(cur_completion)
for j in range(len(cur_prompt_outputs)):
try:
new_entry ={}
new_entry['audio_id'] = audio_id
new_entry['instruction'] = cur_prompt_outputs[j]['q']
new_entry['output'] = cur_prompt_outputs[j]['a']
all_outputs.append(new_entry)
except:
pass
if len(all_outputs) % 10 == 0:
print('{:d} questions generated, {:d} tokens'.format(len(all_outputs), int(used_token)))
with open(output_file, 'w') as f:
json.dump(all_outputs, f, indent=1)
save_list_of_lists_to_disk(raw_outputs, output_file[:-4]+'_raw.json')
except:
pass
with open(output_file, 'w') as f:
json.dump(all_outputs, f, indent=1)
model_list = ['gpt-3.5-turbo-0301_l', 'gpt-3.5-turbo-0301_g', 'gpt-3.5-turbo-0613_l', 'gpt-3.5-turbo-0613_g', "gpt-3.5-turbo_l", "gpt-3.5-turbo_g"]
model_engine= model_list[int(split_id) % len(model_list)]
with open('/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/speech_qa/open_end/datafiles/qa/fma/{:s}.json'.format(split_id), 'r') as fp:
data = json.load(fp)
print(len(data))
prompt_list = [[data[i]['audio_id'], generate_prompt(data[i])] for i in range(len(data))]
begin = time.time()
generate_qa(prompt_list, '/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/speech_qa/open_end/datafiles/qa/fma_out/fma_{:s}.json'.format(split_id), model_engine=model_engine)
end = time.time()
print('time eclipse, ', end-begin) | [
"Music genre: {:s}; Music Lyrics: \"{:s}\"; Suggest Music Title: \"{:s}\" ",
"PLACEHOLDER\" {text}\"",
"genre",
", ",
"Based on the following music, generate 5 different types of complex open-ended questions that require step-by-step thinking, and corresponding answers. \nOnly ask questions about the given music. No general/background questions. Do not use the given information in the question. \nAsk short, crispy, complex, and diverse questions. Answers need to be longer than 10 words.\nOnly ask questions that have a clear answer based on the given music. The questions need to be of different types.\nQuestions can be e.g., How to describe the music; What can be inferred from the music and why; What is the genre of the music and why; What can be inferred from the melody and lyrics; What is the potential scenario that the musci can be played; If the music is special (e.g., urgent, funny, interesting, abnormal, unique, etc) and why; what mood the music conveys based on lyrics and melody; what can be the potential title of the music, etc. \nFormat each QA pair in a single line as a JSON dictionary (key \"q\" for question, and \"a\" for answer, wrapped with { and }). Do not include any other explanation."
] |
2024-01-10 | YuanGongND/ltu | src~ltu~eval~eval_dcase.py | # -*- coding: utf-8 -*-
# @Time : 4/10/23 5:05 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : eval_llm_cla.py
# evaluation classification based on gpt/bert embeddings
import os.path
import openai
import numpy as np
import math
import json
import string
import torch
import numpy as np
from collections import OrderedDict
from transformers import AutoTokenizer, BertModel
from sklearn.metrics import accuracy_score, classification_report, f1_score
from stats import calculate_stats
dataset = 'dcase'
llm_task = 'caption'
text_embed_setting = 'gpt'
base_path = '/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/'
eval_file_list = ['dcase_formal_audio_lora_mix_from_proj_fz_no_adapter_checkpoint-22000_caption_0.10_0.95_500_repp110.json']
eval_file_list = [ base_path + x for x in eval_file_list]
for x in eval_file_list:
assert os.path.exists(x) == True
num_class = 17
device = "cuda" if torch.cuda.is_available() else "cpu"
bert_mdl_size = 'bert-large-uncased'
bert_tokenizer = AutoTokenizer.from_pretrained(bert_mdl_size, model_max_length=512)
bert_model = BertModel.from_pretrained(bert_mdl_size).to(device)
for eval_file in eval_file_list:
try:
def get_bert_embedding(input_text):
input_text = remove_punctuation_and_lowercase(input_text)
#print(input_text)
inputs = bert_tokenizer(input_text, return_tensors="pt")
if inputs['input_ids'].shape[1] > 512:
inputs['input_ids'] = inputs['input_ids'][:, :512]
inputs['token_type_ids'] = inputs['token_type_ids'][:, :512]
inputs['attention_mask'] = inputs['attention_mask'][:, :512]
#print('trim the length')
#print(inputs['input_ids'].shape)
outputs = bert_model(**inputs.to(device))
last_hidden_states = torch.mean(outputs.last_hidden_state[0], dim=0).cpu().detach().numpy()
return last_hidden_states
def get_gpt_embedding(input_text, mdl_size='text-embedding-ada-002'):
openai.api_key = 'your_open_ai_key'
response = openai.Embedding.create(
input=input_text,
model=mdl_size
)
embeddings = response['data'][0]['embedding']
return embeddings
def cosine_similarity(vector1, vector2):
dot_product = sum(v1 * v2 for v1, v2 in zip(vector1, vector2))
magnitude1 = math.sqrt(sum(v1 ** 2 for v1 in vector1))
magnitude2 = math.sqrt(sum(v2 ** 2 for v2 in vector2))
return dot_product / (magnitude1 * magnitude2)
def remove_punctuation_and_lowercase(text):
text = text.translate(str.maketrans('', '', string.punctuation))
text = text.lower()
return text
def gen_cm(all_truth, all_pred, save_name):
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
# list of label names
label_names = list(label_dict.keys())
# generate confusion matrix
cm = confusion_matrix(all_truth, all_pred)
# plot confusion matrix as a figure
plt.imshow(cm, cmap=plt.cm.Blues)
plt.title("Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(label_names))
plt.xticks(tick_marks, label_names, rotation=90, fontsize=6)
plt.yticks(tick_marks, label_names, fontsize=6)
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
# add label values to the confusion matrix cells
for i in range(len(label_names)):
for j in range(len(label_names)):
plt.text(j, i, cm[i, j], ha="center", va="center", color="white")
plt.savefig(save_name, dpi=300)
label_list = np.loadtxt('/data/sls/scratch/yuangong/audiollm/src/data/prep_data/eval_sets/labels/class_labels_indices_dcase.csv', delimiter=',', dtype=str, skiprows=1)
# load cached label embedding dict
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting)):
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'r') as f:
json_str = f.read()
label_dict = json.loads(json_str, object_pairs_hook=OrderedDict)
else:
label_dict = OrderedDict()
for i in range(label_list.shape[0]):
class_code = label_list[i, 1]
class_name = label_list[i, 2][1:-1]
if text_embed_setting == 'gpt':
label_dict[class_name] = get_gpt_embedding('sound of ' + class_name.replace('_', ' ').lower())
elif text_embed_setting == 'bert':
label_dict[class_name] = get_bert_embedding('sound of ' + class_name.replace('_', ' ').lower())
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'w') as f:
json_str = json.dumps(label_dict)
f.write(json_str)
print(label_dict)
with open(eval_file, 'r') as fp:
eval_data = json.load(fp)
print(eval_data[0])
print(eval_data[0]['pred'].split(':')[-2].split('.')[0][1:].split(';'))
print(eval_data[0]['audio_id'])
print(eval_data[0]['pred'].split(':')[-1][1:])
print(eval_data[0]['ref'].split(':')[-1])
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting)) == True:
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting), 'r') as f:
embed_cache = f.read()
embed_cache = json.loads(embed_cache)
else:
embed_cache = {}
def get_pred(cur_pred_list, label_dict, mode='max'):
# at beginning, all zero scores
score = np.zeros(num_class)
label_embed_list = list(label_dict.values())
# pred might not be a single text
for cur_pred in cur_pred_list:
if cur_pred in embed_cache:
cur_pred_embed = embed_cache[cur_pred]
else:
if text_embed_setting == 'gpt':
cur_pred_embed = get_gpt_embedding(cur_pred)
else:
cur_pred_embed = get_bert_embedding(cur_pred)
embed_cache[cur_pred] = cur_pred_embed
for i in range(num_class):
if mode == 'accu':
score[i] = score[i] + cosine_similarity(cur_pred_embed, label_embed_list[i])
elif mode == 'max':
score[i] = max(score[i], cosine_similarity(cur_pred_embed, label_embed_list[i]))
cur_pred = np.argmax(score)
return cur_pred
num_sample = len(eval_data)
print('number of samples {:d}'.format(num_sample))
all_pred = np.zeros([num_sample, num_class])
all_truth = np.zeros([num_sample, num_class])
for i in range(num_sample):
cur_audio_id = eval_data[i]['audio_id']
if llm_task == 'cla':
cur_pred_list = eval_data[i]['pred'].split(':')[-1].split('.')[0][1:].split(';')
cur_pred_list = ['sound of ' + x.lower().lstrip() for x in cur_pred_list]
elif llm_task == 'caption':
cur_pred_list = eval_data[i]['pred'].split(':')[-1][1:]
cur_pred_list = ['sound of ' + cur_pred_list.lower()]
cur_truth = eval_data[i]['ref'].split(':')[-1].replace(',', '')
cur_truth_idx = list(label_dict.keys()).index(cur_truth)
print(cur_truth_idx)
cur_pred_idx = get_pred(cur_pred_list, label_dict)
print('Truth: ', cur_truth_idx, list(label_dict.keys())[cur_truth_idx], 'Pred: ', cur_pred_idx, list(label_dict.keys())[cur_pred_idx], cur_truth_idx==cur_pred_idx, cur_pred_list)
all_pred[i, cur_pred_idx] = 1.0
all_truth[i, cur_truth_idx] = 1.0
save_fold = "/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/{:s}_{:s}_{:s}_cla_report".format('.'.join(eval_file.split('/')[-1].split('.')[:-1]), llm_task, text_embed_setting)
if os.path.exists(save_fold) == False:
os.makedirs(save_fold)
np.save(save_fold + '/all_pred.npy', all_pred)
np.save(save_fold + '/all_truth.npy', all_truth)
stats = calculate_stats(all_pred, all_truth)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
acc = stats[0]['acc']
macro_f1 = f1_score(all_truth, all_pred, average='macro')
micro_f1 = f1_score(all_truth, all_pred, average='micro')
np.savetxt(save_fold + '/result_summary.csv', [mAP, mAUC, acc, macro_f1, micro_f1], delimiter=',')
embed_cache = json.dumps(embed_cache)
save_cache_path = '/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format(dataset, llm_task, text_embed_setting)
with open(save_cache_path, 'w') as f:
f.write(embed_cache)
sk_acc = accuracy_score(all_truth, all_pred)
print('dcase f1 macro micro: ', macro_f1, micro_f1)
report = classification_report(all_truth, all_pred, target_names=list(label_dict.keys()))
with open(save_fold + "/cla_summary.txt", "w") as f:
f.write(report)
gen_cm(all_truth.argmax(axis=1), all_pred.argmax(axis=1), save_fold + "/cm.png")
except:
pass
| [] |
2024-01-10 | YuanGongND/ltu | src~ltu~eval~eval_as.py | # -*- coding: utf-8 -*-
# @Time : 4/10/23 5:05 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : eval_llm_cla.py
# evaluation classification based on gpt/bert embeddings
import os.path
import openai
import numpy as np
import math
import json
import string
import torch
import numpy as np
from collections import OrderedDict
from transformers import AutoTokenizer, BertModel
from sklearn.metrics import accuracy_score, classification_report
import collections
import csv
from stats import calculate_stats
import openai
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
dataset = 'as'
llm_task = 'caption'
text_embed_setting = 'gpt'
base_path = '/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/'
eval_file_list = ['as_formal_audio_lora_mix_no_corr_cont_checkpoint-22000_caption_0.10_0.95_500_repp110.json']
eval_file_list = [ base_path + x for x in eval_file_list]
num_class = 527
device = "cuda" if torch.cuda.is_available() else "cpu"
bert_mdl_size = 'bert-large-uncased'
bert_tokenizer = AutoTokenizer.from_pretrained(bert_mdl_size, model_max_length=512)
bert_model = BertModel.from_pretrained(bert_mdl_size).to(device)
for eval_file in eval_file_list:
try:
def get_bert_embedding(input_text):
input_text = remove_punctuation_and_lowercase(input_text)
#print(input_text)
inputs = bert_tokenizer(input_text, return_tensors="pt")
if inputs['input_ids'].shape[1] > 512:
inputs['input_ids'] = inputs['input_ids'][:, :512]
inputs['token_type_ids'] = inputs['token_type_ids'][:, :512]
inputs['attention_mask'] = inputs['attention_mask'][:, :512]
outputs = bert_model(**inputs.to(device))
last_hidden_states = torch.mean(outputs.last_hidden_state[0], dim=0).cpu().detach().numpy()
return last_hidden_states
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(10))
def embedding_with_backoff(**kwargs):
return openai.Embedding.create(**kwargs)
def get_gpt_embedding(input_text, mdl_size='text-embedding-ada-002'):
openai.api_key = 'your_open_ai_key'
response = embedding_with_backoff(
input=input_text,
model=mdl_size
)
embeddings = response['data'][0]['embedding']
return embeddings
def cosine_similarity(vector1, vector2):
dot_product = sum(v1 * v2 for v1, v2 in zip(vector1, vector2))
magnitude1 = math.sqrt(sum(v1 ** 2 for v1 in vector1))
magnitude2 = math.sqrt(sum(v2 ** 2 for v2 in vector2))
return dot_product / (magnitude1 * magnitude2)
def remove_punctuation_and_lowercase(text):
text = text.translate(str.maketrans('', '', string.punctuation))
text = text.lower()
return text
def gen_cm(all_truth, all_pred, save_name):
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
# list of label names
label_names = list(label_dict.keys())
# generate confusion matrix
cm = confusion_matrix(all_truth, all_pred)
# plot confusion matrix as a figure
plt.imshow(cm, cmap=plt.cm.Blues)
plt.title("Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(label_names))
plt.xticks(tick_marks, label_names, rotation=90, fontsize=6)
plt.yticks(tick_marks, label_names, fontsize=6)
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
# add label values to the confusion matrix cells
for i in range(len(label_names)):
for j in range(len(label_names)):
plt.text(j, i, cm[i, j], ha="center", va="center", color="white")
plt.savefig(save_name, dpi=300)
def make_name_dict(label_csv):
name_lookup = collections.OrderedDict()
with open(label_csv, 'r') as f:
csv_reader = csv.DictReader(f)
line_count = 0
for row in csv_reader:
name_lookup[row['mid']] = row['display_name']
line_count += 1
return name_lookup
label_csv = '/data/sls/scratch/yuangong/audiollm/src/data/prep_data/eval_sets/labels/class_labels_indices_as.csv'
ori_label_dict = make_name_dict(label_csv)
print(ori_label_dict)
# load cached label embedding dict
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting)):
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'r') as f:
json_str = f.read()
label_dict = json.loads(json_str, object_pairs_hook=OrderedDict)
else:
label_dict = OrderedDict()
for key in ori_label_dict.keys():
class_name = ori_label_dict[key]
print(class_name)
if text_embed_setting == 'gpt':
label_dict[class_name] = get_gpt_embedding('sound of ' + class_name.lower())
elif text_embed_setting == 'bert':
label_dict[class_name] = get_bert_embedding('sound of ' + class_name.lower())
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'w') as f:
json_str = json.dumps(label_dict)
f.write(json_str)
print(label_dict.keys())
with open(eval_file, 'r') as fp:
eval_data = json.load(fp)
print(eval_data[0])
print(eval_data[0]['audio_id'])
print(eval_data[0]['pred'].split(': ')[-1].split('; '))
print(eval_data[0]['pred'].replace('"', '').split('Audio caption')[-1][2:])
print(eval_data[0]['ref'].split(': ')[-1].split('; '))
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting)) == True:
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting), 'r') as f:
embed_cache = f.read()
embed_cache = json.loads(embed_cache)
else:
embed_cache = {}
def get_pred(cur_pred_list, label_dict, mode='accu'):
# at beginning, all zero scores
score = np.zeros(num_class)
label_embed_list = list(label_dict.values())
# pred might not be a single text
for cur_pred in cur_pred_list:
if cur_pred in embed_cache:
cur_pred_embed = embed_cache[cur_pred]
else:
if text_embed_setting == 'gpt':
cur_pred_embed = get_gpt_embedding(cur_pred)
else:
cur_pred_embed = get_bert_embedding(cur_pred)
embed_cache[cur_pred] = cur_pred_embed
for i in range(num_class):
if mode == 'accu':
score[i] = score[i] + cosine_similarity(cur_pred_embed, label_embed_list[i])
elif mode == 'max':
score[i] = max(score[i], cosine_similarity(cur_pred_embed, label_embed_list[i]))
return score
num_sample = len(eval_data)
print('number of samples {:d}'.format(num_sample))
all_pred = np.zeros([num_sample, num_class])
all_truth = np.zeros([num_sample, num_class])
for i in range(num_sample):
cur_audio_id = eval_data[i]['audio_id']
if llm_task == 'cla':
cur_pred_list = eval_data[i]['pred'].split(':')[-1].split(': ')[-1].split('; ')
cur_pred_list = ['sound of ' + x.lower().lstrip() for x in cur_pred_list]
elif llm_task == 'caption':
cur_pred_list = eval_data[i]['pred'].replace('"', '').split('Audio caption')[-1][2:]
cur_pred_list = ['sound of ' + cur_pred_list.lower()]
cur_truth_list = eval_data[i]['ref'].split(': ')[-1].split('; ')
for cur_truth in cur_truth_list:
cur_truth_idx = list(label_dict.keys()).index(cur_truth)
all_truth[i, cur_truth_idx] = 1.0
all_pred[i] = get_pred(cur_pred_list, label_dict)
if i% 100 == 0:
print('{:d} / {:d} processed'.format(i, num_sample))
save_fold = "/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/{:s}_{:s}_{:s}_cla_report".format('.'.join(eval_file.split('/')[-1].split('.')[:-1]), llm_task, text_embed_setting)
if os.path.exists(save_fold) == False:
os.makedirs(save_fold)
np.save(save_fold + '/all_pred.npy', all_pred)
np.save(save_fold + '/all_truth.npy', all_truth)
stats = calculate_stats(all_pred, all_truth)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
acc = stats[0]['acc']
np.savetxt(save_fold + '/result_summary.csv', [mAP, mAUC, acc], delimiter=',')
embed_cache = json.dumps(embed_cache)
save_cache_path = '/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format(dataset, llm_task, text_embed_setting)
with open(save_cache_path, 'w') as f:
f.write(embed_cache)
print('AudioSet mAP: ', mAP)
except:
pass | [] |
2024-01-10 | YuanGongND/ltu | src~ltu~eval~eval_vs.py | # -*- coding: utf-8 -*-
# @Time : 4/10/23 5:05 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : eval_llm_cla.py
# evaluation classification based on gpt/bert embeddings
import os.path
import openai
import numpy as np
import math
import json
import string
import torch
import numpy as np
from collections import OrderedDict
from transformers import AutoTokenizer, BertModel
from sklearn.metrics import accuracy_score, classification_report
from stats import calculate_stats
dataset = 'vs'
llm_task = 'caption'
text_embed_setting = 'gpt'
base_path = '/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/'
eval_file_list = ['vs_formal_audio_lora_mix_from_proj_fz_no_adapter_checkpoint-22000_caption_0.10_0.95_500_repp110.json']
eval_file_list = [ base_path + x for x in eval_file_list]
for x in eval_file_list:
assert os.path.exists(x) == True
num_class = 6
device = "cuda" if torch.cuda.is_available() else "cpu"
bert_mdl_size = 'bert-large-uncased'
bert_tokenizer = AutoTokenizer.from_pretrained(bert_mdl_size, model_max_length=512)
bert_model = BertModel.from_pretrained(bert_mdl_size).to(device)
for eval_file in eval_file_list:
try:
def get_bert_embedding(input_text):
input_text = remove_punctuation_and_lowercase(input_text)
#print(input_text)
inputs = bert_tokenizer(input_text, return_tensors="pt")
if inputs['input_ids'].shape[1] > 512:
inputs['input_ids'] = inputs['input_ids'][:, :512]
inputs['token_type_ids'] = inputs['token_type_ids'][:, :512]
inputs['attention_mask'] = inputs['attention_mask'][:, :512]
#print('trim the length')
#print(inputs['input_ids'].shape)
outputs = bert_model(**inputs.to(device))
last_hidden_states = torch.mean(outputs.last_hidden_state[0], dim=0).cpu().detach().numpy()
return last_hidden_states
def get_gpt_embedding(input_text, mdl_size='text-embedding-ada-002'):
openai.api_key = 'your_open_ai_key'
response = openai.Embedding.create(
input=input_text,
model=mdl_size
)
embeddings = response['data'][0]['embedding']
return embeddings
def cosine_similarity(vector1, vector2):
dot_product = sum(v1 * v2 for v1, v2 in zip(vector1, vector2))
magnitude1 = math.sqrt(sum(v1 ** 2 for v1 in vector1))
magnitude2 = math.sqrt(sum(v2 ** 2 for v2 in vector2))
return dot_product / (magnitude1 * magnitude2)
def remove_punctuation_and_lowercase(text):
text = text.translate(str.maketrans('', '', string.punctuation))
text = text.lower()
return text
def gen_cm(all_truth, all_pred, save_name):
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
# list of label names
label_names = list(label_dict.keys())
# generate confusion matrix
cm = confusion_matrix(all_truth, all_pred)
# plot confusion matrix as a figure
plt.imshow(cm, cmap=plt.cm.Blues)
plt.title("Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(label_names))
plt.xticks(tick_marks, label_names, rotation=90, fontsize=6)
plt.yticks(tick_marks, label_names, fontsize=6)
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
# add label values to the confusion matrix cells
for i in range(len(label_names)):
for j in range(len(label_names)):
plt.text(j, i, cm[i, j], ha="center", va="center", color="white")
plt.savefig(save_name, dpi=300)
label_list = np.loadtxt('/data/sls/scratch/yuangong/audiollm/src/data/prep_data/eval_sets/labels/class_labels_indices_vs.csv', delimiter=',', dtype=str, skiprows=1)
# load cached label embedding dict
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting)):
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'r') as f:
json_str = f.read()
label_dict = json.loads(json_str, object_pairs_hook=OrderedDict)
else:
label_dict = OrderedDict()
for i in range(label_list.shape[0]):
class_code = label_list[i, 1]
class_name = label_list[i, 2][1:-1]
if text_embed_setting == 'gpt':
label_dict[class_name] = get_gpt_embedding('sound of ' + class_name.replace('_', ' ').lower())
elif text_embed_setting == 'bert':
label_dict[class_name] = get_bert_embedding('sound of ' + class_name.replace('_', ' ').lower())
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'w') as f:
json_str = json.dumps(label_dict)
f.write(json_str)
with open(eval_file, 'r') as fp:
eval_data = json.load(fp)
print(eval_data[0])
print(eval_data[0]['pred'].split(':')[-2].split('.')[0][1:].split(';'))
print(eval_data[0]['audio_id'])
print(eval_data[0]['pred'].split(':')[-1][1:])
print(eval_data[0]['ref'].split(':')[-1])
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting)) == True:
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting), 'r') as f:
embed_cache = f.read()
embed_cache = json.loads(embed_cache)
else:
embed_cache = {}
def get_pred(cur_pred_list, label_dict, mode='max'):
# at beginning, all zero scores
score = np.zeros(num_class)
label_embed_list = list(label_dict.values())
# pred might not be a single text
for cur_pred in cur_pred_list:
if cur_pred in embed_cache:
cur_pred_embed = embed_cache[cur_pred]
else:
if text_embed_setting == 'gpt':
cur_pred_embed = get_gpt_embedding(cur_pred)
else:
cur_pred_embed = get_bert_embedding(cur_pred)
embed_cache[cur_pred] = cur_pred_embed
for i in range(num_class):
if mode == 'accu':
score[i] = score[i] + cosine_similarity(cur_pred_embed, label_embed_list[i])
elif mode == 'max':
score[i] = max(score[i], cosine_similarity(cur_pred_embed, label_embed_list[i]))
cur_pred = np.argmax(score)
return cur_pred
num_sample = len(eval_data)
print('number of samples {:d}'.format(num_sample))
all_pred = np.zeros([num_sample, num_class])
all_truth = np.zeros([num_sample, num_class])
for i in range(num_sample):
cur_audio_id = eval_data[i]['audio_id']
if llm_task == 'cla':
cur_pred_list = eval_data[i]['pred'].split(':')[-1].split('.')[0][1:].split(';')
cur_pred_list = ['sound of ' + x.lower().lstrip() for x in cur_pred_list]
elif llm_task == 'caption':
cur_pred_list = eval_data[i]['pred'].split(':')[-1][1:]
cur_pred_list = ['sound of ' + cur_pred_list.lower()]
cur_truth = eval_data[i]['ref'].split(':')[-1]
cur_truth_idx = list(label_dict.keys()).index(cur_truth)
print(cur_truth_idx)
cur_pred_idx = get_pred(cur_pred_list, label_dict)
print('Truth: ', cur_truth_idx, list(label_dict.keys())[cur_truth_idx], 'Pred: ', cur_pred_idx, list(label_dict.keys())[cur_pred_idx], cur_truth_idx==cur_pred_idx, cur_pred_list)
all_pred[i, cur_pred_idx] = 1.0
all_truth[i, cur_truth_idx] = 1.0
save_fold = "/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/{:s}_{:s}_{:s}_cla_report".format('.'.join(eval_file.split('/')[-1].split('.')[:-1]), llm_task, text_embed_setting)
if os.path.exists(save_fold) == False:
os.makedirs(save_fold)
np.save(save_fold + '/all_pred.npy', all_pred)
np.save(save_fold + '/all_truth.npy', all_truth)
stats = calculate_stats(all_pred, all_truth)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
acc = stats[0]['acc']
np.savetxt(save_fold + '/result_summary.csv', [mAP, mAUC, acc], delimiter=',')
embed_cache = json.dumps(embed_cache)
save_cache_path = '/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format(dataset, llm_task, text_embed_setting)
with open(save_cache_path, 'w') as f:
f.write(embed_cache)
sk_acc = accuracy_score(all_truth, all_pred)
print('vocal sound accuracy: ', acc, sk_acc)
report = classification_report(all_truth, all_pred, target_names=list(label_dict.keys()))
with open(save_fold + "/cla_summary.txt", "w") as f:
f.write(report)
gen_cm(all_truth.argmax(axis=1), all_pred.argmax(axis=1), save_fold + "/cm.png")
except:
pass
| [] |
2024-01-10 | YuanGongND/ltu | src~ltu_as~qa_generation~gpt_qa_batch_jointas_v2.py | # -*- coding: utf-8 -*-
# @Time : 4/29/23 3:04 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : gpt_qa.py
# joint audio and speech
import openai
import random
import numpy as np
import json
import re
import time
import pickle
import sys
split_id = sys.argv[1]
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
# by combining speech content, fundamental frequency, speed, and volume information
sys_prompt = """Based on the following recording containing spoken text and background sounds, think of what can be inferred from the spoken text and background sounds together with a short paragraph for about 30 words,
e.g., Why the background sound and speech appear together? How speech content and background sounds related and why? What can be inferred from the speech content and background sounds together? How do the speaker react to the background sounds and why? In which scenario such speech content and background sounds would appear together and why? etc."""
sys_prompt2 = """Based on the following recording containing speech and background sounds, generate 10 different types of complex open-ended questions related to both (important!) spoken text and background sounds that require step-by-step thinking, and corresponding answers.
Do not ask questions that cannot be determined or are unclear based on given recording. Answers need to be at least 10 words and does not contain "unclear", "cannot be determine", etc.
Questions should be e.g., Why the background sound and speech appear together? How speech content and background sounds related and why? What can be inferred from the speech content and background sounds together? How do the speaker react to the background sounds and why? In which scenario such speech content and background sounds would appear together and why? etc.
Format each QA pair in a single line as a JSON dictionary (key "q" for question, and "a" for answer, wrapped with { and }). Do not include any other explanation."""
base_template = np.loadtxt('/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/speech_qa/question_temp/open_jointas.txt', delimiter='\t', dtype=str)
print(base_template.shape)
print(base_template[0])
def generate_prompt(entry):
if 'Singing' not in entry['audio_tag']:
prompt = """In the recording, background sound of {:s}, and speech of "{:s}" is heard. """
prompt = prompt.format(', '.join(entry['audio_tag']), entry['text'])
else:
prompt = """In the recording, background sound of {:s} and singing of "{:s}" is heard. """
prompt = prompt.format(', '.join(entry['audio_tag']), entry['text'])
print(prompt)
return prompt
def generate_prompt_2(entry, sum):
if 'Singing' not in entry['audio_tag']:
prompt = """In the recording, background sound of {:s}, and speech of "{:s}" is heard. """
prompt = prompt.format(', '.join(entry['audio_tag']), entry['text'])
else:
prompt = """In the recording, background sound of {:s} and singing of "{:s}" is heard. """
prompt = prompt.format(', '.join(entry['audio_tag']), entry['text'])
prompt = prompt + sum
return prompt
def remove_symbols(string):
symbols = ',"}{'
return string.strip(symbols).strip()
def extract_parts(string):
start_index = string.find('"q"') + 4
end_index = string.find('"a"')
part1 = string[start_index:end_index].strip()
start_index = end_index + 4
part2 = string[start_index:].strip()
return {"q": remove_symbols(part1), "a": remove_symbols(part2)}
def decode_output(gpt_out):
#print(gpt_out)
gpt_out = gpt_out.replace('\n', '')
# gpt_out = gpt_out.replace("'q'", "\"q\"")
# gpt_out = gpt_out.replace("'a'", "\"a\"")
gpt_out = re.findall(r'\{.*?\}', gpt_out)
qa_list = []
for x in gpt_out:
try:
qa_list.append(json.loads(x))
except Exception as e:
try:
qa_list.append(extract_parts(x))
except:
print(x)
return qa_list
def save_list_of_lists_to_disk(data, filename):
with open(filename, 'w') as f:
json.dump(data, f, indent=1)
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(50))
def complete_with_backoff(**kwargs):
return openai.ChatCompletion.create(**kwargs)
def generate_qa(prompt_list, output_file, max_tokens=1024, total_n_completions=1, model_engine="gpt-3.5-turbo"):
model_engine, key_id = model_engine.split('_')[0], model_engine.split('_')[1]
if key_id == 'l':
# TODO: change to your own openai key
openai.api_key = 'your_openai_key'
elif key_id == 'g':
# TODO: change to your own openai key
openai.api_key = 'your_openai_key'
all_outputs = []
raw_outputs = []
used_token = 0
for prompt_pair in prompt_list:
try:
audio_id, cur_text, prompt = prompt_pair[0], prompt_pair[1], prompt_pair[2]
prompt = generate_prompt(prompt_pair[2])
response = complete_with_backoff(
model=model_engine,
messages=[{"role": "system", "content": sys_prompt},
{"role": "user", "content": prompt + '" {text}"'}], # }
max_tokens=max_tokens,
n=total_n_completions)
cur_completion = response.choices[0].message.content.strip()
raw_outputs.append([cur_completion])
used_token += len(cur_completion)/4 + len(prompt)/4
print(cur_completion)
cur_prompt_outputs = cur_completion
cur_base_instruction = base_template[random.randint(0, base_template.shape[0] - 1)]
new_entry = {}
new_entry['audio_id'] = audio_id
new_entry['input'] = cur_text
new_entry['instruction'] = cur_base_instruction
new_entry['output'] = cur_completion
new_entry['task'] = 'joint audio and speech summarization'
all_outputs.append(new_entry)
prompt2 = generate_prompt_2(prompt_pair[2], cur_prompt_outputs)
print(prompt2)
response = complete_with_backoff(
model=model_engine,
messages=[{"role": "system", "content": sys_prompt2},
{"role": "user", "content": prompt2 + '" {text}"'}], # }
max_tokens=max_tokens,
n=total_n_completions)
cur_completion = response.choices[0].message.content.strip()
raw_outputs.append([cur_completion])
used_token += len(cur_completion) / 4 + len(prompt) / 4
cur_prompt_outputs = decode_output(cur_completion)
for j in range(len(cur_prompt_outputs)):
new_entry = {}
new_entry['audio_id'] = audio_id
new_entry['input'] = cur_text
new_entry['instruction'] = cur_prompt_outputs[j]['q']
new_entry['output'] = cur_prompt_outputs[j]['a']
new_entry['task'] = 'open-ended joint audio and speech qa'
all_outputs.append(new_entry)
if len(all_outputs) % 10 == 0:
print('{:d} questions generated, {:d} tokens'.format(len(all_outputs), int(used_token)))
with open(output_file, 'w') as f:
json.dump(all_outputs, f, indent=1)
save_list_of_lists_to_disk(raw_outputs, output_file[:-4] + '_raw.json')
except Exception as e:
print(e)
except Exception as e:
print(e)
with open(output_file, 'w') as f:
print(len(all_outputs))
json.dump(all_outputs, f, indent=1)
model_list = ['gpt-3.5-turbo-0301_l', 'gpt-3.5-turbo-0301_g', 'gpt-3.5-turbo-0613_l', 'gpt-3.5-turbo-0613_g', "gpt-3.5-turbo_l", "gpt-3.5-turbo_g"]
model_engine= model_list[int(split_id) % len(model_list)]
with open('/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/speech_qa/open_end/datafiles/qa/joint_audioset/{:s}.json'.format(split_id), 'r') as fp:
data = json.load(fp)
print(len(data))
prompt_list = [[data[i]['audio_id'], data[i]['text'], data[i]] for i in range(len(data))]
begin = time.time()
generate_qa(prompt_list, '/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/speech_qa/open_end/datafiles/qa/joint_audioset_out_imp_after_submission/joint_as_{:s}.json'.format(split_id), model_engine=model_engine)
end = time.time()
print('time eclipse, ', end-begin) | [
"Based on the following recording containing speech and background sounds, generate 10 different types of complex open-ended questions related to both (important!) spoken text and background sounds that require step-by-step thinking, and corresponding answers. \nDo not ask questions that cannot be determined or are unclear based on given recording. Answers need to be at least 10 words and does not contain \"unclear\", \"cannot be determine\", etc.\nQuestions should be e.g., Why the background sound and speech appear together? How speech content and background sounds related and why? What can be inferred from the speech content and background sounds together? How do the speaker react to the background sounds and why? In which scenario such speech content and background sounds would appear together and why? etc.\nFormat each QA pair in a single line as a JSON dictionary (key \"q\" for question, and \"a\" for answer, wrapped with { and }). Do not include any other explanation.",
"Based on the following recording containing spoken text and background sounds, think of what can be inferred from the spoken text and background sounds together with a short paragraph for about 30 words,\n e.g., Why the background sound and speech appear together? How speech content and background sounds related and why? What can be inferred from the speech content and background sounds together? How do the speaker react to the background sounds and why? In which scenario such speech content and background sounds would appear together and why? etc.",
"PLACEHOLDER\" {text}\"",
"In the recording, background sound of {:s} and singing of \"{:s}\" is heard. ",
"\t",
"In the recording, background sound of {:s}, and speech of \"{:s}\" is heard. ",
", ",
"/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/speech_qa/question_temp/open_jointas.txt"
] |
2024-01-10 | YuanGongND/ltu | src~ltu_as~qa_generation~gpt_qa_batch_mosei.py | # -*- coding: utf-8 -*-
# @Time : 4/29/23 3:04 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : gpt_qa.py
import openai
import numpy as np
import json
import re
import time
import pickle
import sys
split_id = sys.argv[1]
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
# by combining speech content, fundamental frequency, speed, and volume information
sys_prompt = """Based on the following speech, generate 20 different types of complex open-ended questions that require step-by-step thinking, and corresponding answers.
Only ask questions about the given speech emotion. No general/background questions. Do not use the given information in the question.
Ask short, crispy, complex, and diverse questions. Answers need to be longer than 10 words.
Only ask questions that have a clear answer based on the given speech. The questions need to be of different types.
Questions can be e.g., What's the emotion of the speaker; How emotion is inferred from the speech content, f0, speed, and energy; What can be inferred from speech content and emotion and why; How speech content is related to the emotion and why; What is the intent and implicit meaning of the speech and why; What is the potential scenario that the speech could happen and why; If the speech is special and why; what mood the speech conveys, etc.
Format each QA pair in a single line as a JSON dictionary (key "q" for question, and "a" for answer, wrapped with { and }). Do not include any other explanation."""
def generate_prompt(entry):
prompt = """Speech content: "{:s}"; Speaker gender: {:s}; Speaker's fundamental frequency (F0) is about {}Hz, so the pitch is {:s} among all people, and is {:s} for a {:s} speaker; Speech volume: {:s}; Speech speed: {:s}; Speech emotion: {:s}. On a scale ranging from highly negative (-3) to highly positive (3), the emotional rating of this speech is {:s}, which is {:s}."""
emotion_score_dict = {-3: 'highly negative', -2: 'negative', -1: 'weakly negative', 0: 'neutral', 1: 'weakly positive', 2: 'positive', 3: 'highly positive'}
def gender_f0_rank(pitch, gender):
if gender == 'male':
f0_percentiles = [95, 120, 135, 180]
elif gender == 'female':
f0_percentiles = [160, 200, 220, 270]
if pitch < f0_percentiles[0]:
pitch_gender_rank = 'very low (<{:d} Hz)'.format(f0_percentiles[0])
elif pitch < f0_percentiles[1]:
pitch_gender_rank = 'relatively low ({:d}-{:d} Hz)'.format(f0_percentiles[0], f0_percentiles[1])
elif pitch < f0_percentiles[2]:
pitch_gender_rank = 'medium ({:d}-{:d} Hz)'.format(f0_percentiles[1], f0_percentiles[2])
elif pitch < f0_percentiles[3]:
pitch_gender_rank = 'relatively high ({:d}-{:d} Hz)'.format(f0_percentiles[2], f0_percentiles[3])
else:
pitch_gender_rank = 'very high (>240 Hz)'.format(f0_percentiles[3])
return pitch_gender_rank
prompt = prompt.format(entry['text'], entry['gender'], entry['pitch_detail'], entry['pitch'], gender_f0_rank(entry['pitch_detail'], entry['gender']), entry['gender'], entry['energy'], entry['speed'], entry['emotion'], entry['sentiment_score'], emotion_score_dict[int(entry['sentiment_score'])])
return prompt
def decode_output(gpt_out):
#print(gpt_out)
gpt_out = gpt_out.replace('\n', '')
# gpt_out = gpt_out.replace("'q'", "\"q\"")
# gpt_out = gpt_out.replace("'a'", "\"a\"")
gpt_out = re.findall(r'\{.*?\}', gpt_out)
qa_list = [json.loads(x) for x in gpt_out]
return qa_list
def save_list_of_lists_to_disk(data, filename):
with open(filename, 'w') as f:
json.dump(data, f, indent=1)
#print(f"List of lists saved to {filename}.")
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(10))
def complete_with_backoff(**kwargs):
return openai.ChatCompletion.create(**kwargs)
def generate_qa(prompt_list, output_file, max_tokens=1024, total_n_completions=1, model_engine="gpt-3.5-turbo"):
model_engine, key_id = model_engine.split('_')[0], model_engine.split('_')[1]
if key_id == 'l':
# TODO: change to your own openai key
openai.api_key = 'your_openai_key'
elif key_id == 'g':
# TODO: change to your own openai key
openai.api_key = 'your_openai_key'
all_outputs = []
raw_outputs = []
used_token = 0
for prompt_pair in prompt_list:
try:
print(prompt_pair)
audio_id, prompt = prompt_pair[0], prompt_pair[1]
print(sys_prompt + '\n' + prompt)
response = complete_with_backoff(
model=model_engine,
messages=[{"role": "system", "content": sys_prompt},
{"role": "user", "content": prompt + '" {text}"'}], # }
max_tokens=max_tokens,
n=total_n_completions)
cur_completion = response.choices[0].message.content.strip()
raw_outputs.append([cur_completion])
used_token += len(cur_completion)/4 + len(prompt)/4
cur_prompt_outputs = decode_output(cur_completion)
for j in range(len(cur_prompt_outputs)):
try:
new_entry ={}
new_entry['audio_id'] = audio_id
new_entry['instruction'] = cur_prompt_outputs[j]['q']
new_entry['output'] = cur_prompt_outputs[j]['a']
all_outputs.append(new_entry)
except:
pass
if len(all_outputs) % 10 == 0:
print('{:d} questions generated, {:d} tokens'.format(len(all_outputs), int(used_token)))
with open(output_file, 'w') as f:
json.dump(all_outputs, f, indent=1)
save_list_of_lists_to_disk(raw_outputs, output_file[:-4]+'_raw.json')
except:
pass
with open(output_file, 'w') as f:
json.dump(all_outputs, f, indent=1)
model_list = ['gpt-3.5-turbo-0301_l', 'gpt-3.5-turbo-0301_g', 'gpt-3.5-turbo-0613_l', 'gpt-3.5-turbo-0613_g', "gpt-3.5-turbo_l", "gpt-3.5-turbo_g"]
model_engine= model_list[int(split_id) % len(model_list)]
with open('/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/speech_qa/open_end/datafiles/qa/mosei_all/{:s}.json'.format(split_id), 'r') as fp:
data = json.load(fp)
print(len(data))
prompt_list = [[data[i]['wav'], generate_prompt(data[i])] for i in range(len(data))]
begin = time.time()
generate_qa(prompt_list, '/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/speech_qa/open_end/datafiles/qa/mosei_all_output/mosei_{:s}.json'.format(split_id), model_engine=model_engine)
end = time.time()
print('time eclipse, ', end-begin) | [
"gender",
"sentiment_score",
"energy",
"PLACEHOLDER\" {text}\"",
"Speech content: \"{:s}\"; Speaker gender: {:s}; Speaker's fundamental frequency (F0) is about {}Hz, so the pitch is {:s} among all people, and is {:s} for a {:s} speaker; Speech volume: {:s}; Speech speed: {:s}; Speech emotion: {:s}. On a scale ranging from highly negative (-3) to highly positive (3), the emotional rating of this speech is {:s}, which is {:s}.",
"emotion",
"Based on the following speech, generate 20 different types of complex open-ended questions that require step-by-step thinking, and corresponding answers. \nOnly ask questions about the given speech emotion. No general/background questions. Do not use the given information in the question. \nAsk short, crispy, complex, and diverse questions. Answers need to be longer than 10 words.\nOnly ask questions that have a clear answer based on the given speech. The questions need to be of different types.\nQuestions can be e.g., What's the emotion of the speaker; How emotion is inferred from the speech content, f0, speed, and energy; What can be inferred from speech content and emotion and why; How speech content is related to the emotion and why; What is the intent and implicit meaning of the speech and why; What is the potential scenario that the speech could happen and why; If the speech is special and why; what mood the speech conveys, etc. \nFormat each QA pair in a single line as a JSON dictionary (key \"q\" for question, and \"a\" for answer, wrapped with { and }). Do not include any other explanation.",
"pitch_detail"
] |
2024-01-10 | YuanGongND/ltu | src~ltu_as~eval~eval_gtzan_genre.py | # -*- coding: utf-8 -*-
# @Time : 7/13/23 5:21 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : eval_fma_genre.py
import os.path
import datetime
current_time = datetime.datetime.now()
time_string = current_time.strftime("%Y%m%d%H%M%S")
import openai
import math
import json
import string
import torch
import numpy as np
from collections import OrderedDict
from transformers import AutoTokenizer, BertModel
from sklearn.metrics import accuracy_score, classification_report
from stats import calculate_stats
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
from itertools import product
def get_combinations(a, b):
combinations = []
for x, y in product(a, b):
combinations.append(f"{x}_{y}")
return combinations
dataset = 'gtzan'
llm_task = 'caption'
text_embed_setting = 'gpt'
directory = '/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/raw/'
prefix = 'GTZAN'
files = os.listdir(directory)
eval_file_list = [os.path.join(directory, file) for file in files if file.startswith(prefix)]
eval_file_list.sort()
eval_file_list = eval_file_list[::-1]
print(eval_file_list)
eval_file_list = [
'GTZAN_formal_speech_all_open_close_final_checkpoint-35000_genre_fp16_joint_3',
'GTZAN_formal_audio_lora_mix_from_close_from_e2e_cla_from_proj_checkpoint-20000_genre_0.10_0.95_500'
]
eval_file_list = [directory + x + '.json' for x in eval_file_list]
for x in eval_file_list:
assert os.path.exists(x) == True
num_class = 10
device = "cuda" if torch.cuda.is_available() else "cpu"
bert_mdl_size = 'bert-large-uncased'
bert_tokenizer = ""
bert_model = ""
all_res = []
for eval_file in eval_file_list:
def get_bert_embedding(input_text):
input_text = remove_punctuation_and_lowercase(input_text)
inputs = bert_tokenizer(input_text, return_tensors="pt")
if inputs['input_ids'].shape[1] > 512:
inputs['input_ids'] = inputs['input_ids'][:, :512]
inputs['token_type_ids'] = inputs['token_type_ids'][:, :512]
inputs['attention_mask'] = inputs['attention_mask'][:, :512]
outputs = bert_model(**inputs.to(device))
last_hidden_states = torch.mean(outputs.last_hidden_state[0], dim=0).cpu().detach().numpy().tolist()
return last_hidden_states
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(10))
def embedding_with_backoff(**kwargs):
return openai.Embedding.create(**kwargs)
def get_gpt_embedding(input_text, mdl_size='text-embedding-ada-002'):
# TODO: change to your openai key
openai.api_key = 'your_openai_key'
response = embedding_with_backoff(
input=input_text,
model=mdl_size
)
embeddings = response['data'][0]['embedding']
return embeddings
def cosine_similarity(vector1, vector2):
dot_product = sum(v1 * v2 for v1, v2 in zip(vector1, vector2))
magnitude1 = math.sqrt(sum(v1 ** 2 for v1 in vector1))
magnitude2 = math.sqrt(sum(v2 ** 2 for v2 in vector2))
return dot_product / (magnitude1 * magnitude2)
def remove_punctuation_and_lowercase(text):
text = text.translate(str.maketrans('', '', string.punctuation))
text = text.lower()
return text
def gen_cm(all_truth, all_pred, save_name):
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
# list of label names
label_names = list(label_dict.keys())
# generate confusion matrix
cm = confusion_matrix(all_truth, all_pred)
# plot confusion matrix as a figure
plt.imshow(cm, cmap=plt.cm.Blues)
plt.title("Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(label_names))
plt.xticks(tick_marks, label_names, rotation=90, fontsize=6)
plt.yticks(tick_marks, label_names, fontsize=6)
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
# add label values to the confusion matrix cells
for i in range(len(label_names)):
for j in range(len(label_names)):
plt.text(j, i, cm[i, j], ha="center", va="center", color="white")
plt.savefig(save_name, dpi=300)
label_list = ['Hip-Hop', 'Reggae', 'Rock', 'Country', 'Jazz', 'Blues', 'Pop', 'Disco', 'Metal', 'Classical']
label_dict = OrderedDict()
for i in range(len(label_list)):
class_name = label_list[i]
if text_embed_setting == 'gpt':
label_dict[class_name] = get_gpt_embedding('Music Genre: ' + class_name.replace('_', ' ').lower())
elif text_embed_setting == 'bert':
label_dict[class_name] = get_bert_embedding('Music Genre: ' + class_name.replace('_', ' ').lower())
with open(eval_file, 'r') as fp:
eval_data = json.load(fp)
print(eval_data[0])
#print(eval_data[0]['pred'].split(':')[-2].split('.')[0][1:].split(';'))
print(eval_data[0]['audio_id'])
print(eval_data[0]['pred'].split(':')[-1].lstrip())
print(eval_data[0]['ref'].split(':')[-1].lstrip())
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting)) == True:
with open('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting), 'r') as f:
embed_cache = f.read()
embed_cache = json.loads(embed_cache)
else:
embed_cache = {}
def get_pred(cur_pred_list, label_dict, mode='accu'):
# at beginning, all zero scores
score = np.zeros(num_class)
label_embed_list = list(label_dict.values())
# pred might not be a single text
for cur_pred in cur_pred_list:
if cur_pred in embed_cache:
cur_pred_embed = embed_cache[cur_pred]
else:
if text_embed_setting == 'gpt':
cur_pred_embed = get_gpt_embedding(cur_pred)
else:
cur_pred_embed = get_bert_embedding(cur_pred)
embed_cache[cur_pred] = cur_pred_embed
for i in range(num_class):
if mode == 'accu':
score[i] = score[i] + cosine_similarity(cur_pred_embed, label_embed_list[i])
elif mode == 'max':
score[i] = max(score[i], cosine_similarity(cur_pred_embed, label_embed_list[i]))
cur_pred = np.argmax(score)
return cur_pred
num_sample = len(eval_data)
#assert num_sample == 2000
print('number of samples {:d}'.format(num_sample))
all_pred = np.zeros([num_sample, num_class])
all_truth = np.zeros([num_sample, num_class])
for i in range(num_sample):
cur_audio_id = eval_data[i]['audio_id']
if llm_task == 'cla':
cur_pred_list = eval_data[i]['pred'].split(':')[-1].split('.')[0][1:].split(';')
cur_pred_list = ['sound of ' + x.lower().lstrip() for x in cur_pred_list]
elif llm_task == 'caption':
cur_pred_list = eval_data[i]['pred'].split(':')[-1].lstrip()
cur_pred_list = ['Music Genre: ' + cur_pred_list.lower()]
cur_truth = eval_data[i]['ref'].split(':')[-1].lstrip()
cur_truth_idx = list(label_dict.keys()).index(cur_truth)
cur_pred_idx = get_pred(cur_pred_list, label_dict)
if (cur_truth_idx==cur_pred_idx) == False:
print('Truth: ', cur_truth_idx, list(label_dict.keys())[cur_truth_idx], 'Pred: ', cur_pred_idx, list(label_dict.keys())[cur_pred_idx], cur_truth_idx==cur_pred_idx, cur_pred_list)
all_pred[i, cur_pred_idx] = 1.0
all_truth[i, cur_truth_idx] = 1.0
save_fold = "/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/{:s}_{:s}_{:s}_cla_report".format('.'.join(eval_file.split('/')[-1].split('.')[:-1]), llm_task, text_embed_setting)
if os.path.exists(save_fold) == False:
os.makedirs(save_fold)
np.save(save_fold + '/all_pred.npy', all_pred)
np.save(save_fold + '/all_truth.npy', all_truth)
stats = calculate_stats(all_pred, all_truth)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
acc = stats[0]['acc']
np.savetxt(save_fold + '/result_summary.csv', [mAP, mAUC, acc], delimiter=',')
embed_cache = json.dumps(embed_cache)
save_cache_path = '/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format(dataset, llm_task, text_embed_setting)
with open(save_cache_path, 'w') as f:
f.write(embed_cache)
sk_acc = accuracy_score(all_truth, all_pred)
print('gtzan accuracy: ', acc, sk_acc)
all_res.append([eval_file, acc])
np.savetxt('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/summary/summary_gtzan_{:s}.csv'.format(time_string), all_res, delimiter=',', fmt='%s') | [] |
2024-01-10 | YuanGongND/ltu | src~ltu_as~qa_generation~gpt_qa_batch_voxceleb.py | # -*- coding: utf-8 -*-
# @Time : 4/29/23 3:04 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : gpt_qa.py
import openai
import numpy as np
import json
import re
import time
import pickle
import sys
split_id = sys.argv[1]
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
# by combining speech content, fundamental frequency, speed, and volume information
sys_prompt = """Based on the following speech, generate 10 different types of complex open-ended questions that require step-by-step thinking, and corresponding answers.
Only ask questions about the given speech. No general/background questions. Do not use the given information in the question.
Ask short, crispy, complex, and diverse questions. Answers need to be longer than 10 words.
Only ask questions that have a clear answer based on the given speech. The questions need to be of different types.
Questions can be e.g., What is the estimated speaker age and why. What can be inferred from speech content, volume, speed, and speaker pitch and age and why; How speech content is related to speech volume, speed, and speaker pitch and age and why; What is the intent and implicit meaning of the speech and why; What is the potential scenario that the speech could happen and why; If the speech is special (e.g., urgent, funny, interesting, abnormal, unique, etc) and why; What potential mood the speech conveys based speech content and other information; the potential topic of speech, etc.
Format each QA pair in a single line as a JSON dictionary (key "q" for question, and "a" for answer, wrapped with { and }). Do not include any other explanation."""
def generate_prompt(entry):
prompt = """Speech content: "{:s}"; Estimated speaker age: {:s}; Speaker gender: {:s}; Speaker's fundamental frequency (F0) is about {}Hz, so the pitch is {:s} among all people, and is {:s} for a {:s} speaker; Speech volume: {:s}; Speech speed: {:s}."""
def gender_f0_rank(pitch, gender):
if gender == 'male':
f0_percentiles = [95, 120, 135, 180]
elif gender == 'female':
f0_percentiles = [160, 200, 220, 270]
else:
f0_percentiles = [160, 200, 220, 270]
if pitch < f0_percentiles[0]:
pitch_gender_rank = 'very low (<{:d} Hz)'.format(f0_percentiles[0])
elif pitch < f0_percentiles[1]:
pitch_gender_rank = 'relatively low ({:d}-{:d} Hz)'.format(f0_percentiles[0], f0_percentiles[1])
elif pitch < f0_percentiles[2]:
pitch_gender_rank = 'medium ({:d}-{:d} Hz)'.format(f0_percentiles[1], f0_percentiles[2])
elif pitch < f0_percentiles[3]:
pitch_gender_rank = 'relatively high ({:d}-{:d} Hz)'.format(f0_percentiles[2], f0_percentiles[3])
else:
pitch_gender_rank = 'very high (>240 Hz)'.format(f0_percentiles[3])
return pitch_gender_rank
prompt = prompt.format(entry['text'], entry['age'], entry['gender'], entry['pitch_detail'], entry['pitch'], gender_f0_rank(entry['pitch_detail'], entry['gender']), entry['gender'], entry['energy'], entry['speed'])
return prompt
def decode_output(gpt_out):
#print(gpt_out)
gpt_out = gpt_out.replace('\n', '')
# gpt_out = gpt_out.replace("'q'", "\"q\"")
# gpt_out = gpt_out.replace("'a'", "\"a\"")
gpt_out = re.findall(r'\{.*?\}', gpt_out)
qa_list = [json.loads(x) for x in gpt_out]
return qa_list
def save_list_of_lists_to_disk(data, filename):
with open(filename, 'w') as f:
json.dump(data, f, indent=1)
#print(f"List of lists saved to {filename}.")
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(10))
def complete_with_backoff(**kwargs):
return openai.ChatCompletion.create(**kwargs)
def generate_qa(prompt_list, output_file, max_tokens=1024, total_n_completions=1, model_engine="gpt-3.5-turbo"):
model_engine, key_id = model_engine.split('_')[0], model_engine.split('_')[1]
if key_id == 'l':
# TODO: change to your own openai key
openai.api_key = 'your_openai_key'
elif key_id == 'g':
# TODO: change to your own openai key
openai.api_key = 'your_openai_key'
all_outputs = []
raw_outputs = []
used_token = 0
for prompt_pair in prompt_list:
try:
print(prompt_pair)
audio_id, prompt = prompt_pair[0], prompt_pair[1]
print(sys_prompt + '\n' + prompt)
response = complete_with_backoff(
model=model_engine,
messages=[{"role": "system", "content": sys_prompt},
{"role": "user", "content": prompt + '" {text}"'}], # }
max_tokens=max_tokens,
n=total_n_completions)
cur_completion = response.choices[0].message.content.strip()
raw_outputs.append([cur_completion])
used_token += len(cur_completion)/4 + len(prompt)/4
cur_prompt_outputs = decode_output(cur_completion)
for j in range(len(cur_prompt_outputs)):
try:
new_entry ={}
new_entry['audio_id'] = audio_id
new_entry['instruction'] = cur_prompt_outputs[j]['q']
new_entry['output'] = cur_prompt_outputs[j]['a']
all_outputs.append(new_entry)
except:
pass
if len(all_outputs) % 10 == 0:
print('{:d} questions generated, {:d} tokens'.format(len(all_outputs), int(used_token)))
with open(output_file, 'w') as f:
json.dump(all_outputs, f, indent=1)
save_list_of_lists_to_disk(raw_outputs, output_file[:-4]+'_raw.json')
except:
pass
with open(output_file, 'w') as f:
json.dump(all_outputs, f, indent=1)
model_list = ['gpt-3.5-turbo-0301_l', 'gpt-3.5-turbo-0301_g', 'gpt-3.5-turbo-0613_l', 'gpt-3.5-turbo-0613_g', "gpt-3.5-turbo_l", "gpt-3.5-turbo_g"]
model_engine= model_list[int(split_id) % len(model_list)]
with open('/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/speech_qa/open_end/datafiles/qa/voxceleb_train/{:s}.json'.format(split_id), 'r') as fp:
data = json.load(fp)
print(len(data))
prompt_list = [[data[i]['wav'], generate_prompt(data[i])] for i in range(len(data))]
begin = time.time()
generate_qa(prompt_list, '/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/speech_qa/open_end/datafiles/qa/voxceleb_train_out/voxceleb_{:s}.json'.format(split_id), model_engine=model_engine)
end = time.time()
print('time eclipse, ', end-begin) | [
"gender",
"Speech content: \"{:s}\"; Estimated speaker age: {:s}; Speaker gender: {:s}; Speaker's fundamental frequency (F0) is about {}Hz, so the pitch is {:s} among all people, and is {:s} for a {:s} speaker; Speech volume: {:s}; Speech speed: {:s}.",
"energy",
"Based on the following speech, generate 10 different types of complex open-ended questions that require step-by-step thinking, and corresponding answers. \nOnly ask questions about the given speech. No general/background questions. Do not use the given information in the question. \nAsk short, crispy, complex, and diverse questions. Answers need to be longer than 10 words.\nOnly ask questions that have a clear answer based on the given speech. The questions need to be of different types.\nQuestions can be e.g., What is the estimated speaker age and why. What can be inferred from speech content, volume, speed, and speaker pitch and age and why; How speech content is related to speech volume, speed, and speaker pitch and age and why; What is the intent and implicit meaning of the speech and why; What is the potential scenario that the speech could happen and why; If the speech is special (e.g., urgent, funny, interesting, abnormal, unique, etc) and why; What potential mood the speech conveys based speech content and other information; the potential topic of speech, etc. \nFormat each QA pair in a single line as a JSON dictionary (key \"q\" for question, and \"a\" for answer, wrapped with { and }). Do not include any other explanation.",
"PLACEHOLDER\" {text}\"",
"pitch_detail"
] |
2024-01-10 | YuanGongND/ltu | src~ltu~eval~eval_tut.py | # -*- coding: utf-8 -*-
# @Time : 4/10/23 5:05 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : eval_llm_cla.py
# evaluation classification based on gpt/bert embeddings
import os.path
import openai
import numpy as np
import math
import json
import string
import torch
import numpy as np
from collections import OrderedDict
from transformers import AutoTokenizer, BertModel
from sklearn.metrics import accuracy_score, classification_report
from stats import calculate_stats
dataset = 'tut'
llm_task = 'caption'
text_embed_setting = 'gpt'
base_path = '/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/'
eval_file_list = ['tut_formal_audio_lora_mix_from_proj_fz_no_adapter_checkpoint-22000_caption_0.10_0.95_500_repp110.json']
eval_file_list = [ base_path + x for x in eval_file_list]
for x in eval_file_list:
assert os.path.exists(x) == True
num_class = 15
device = "cuda" if torch.cuda.is_available() else "cpu"
bert_mdl_size = 'bert-large-uncased'
bert_tokenizer = AutoTokenizer.from_pretrained(bert_mdl_size, model_max_length=512)
bert_model = BertModel.from_pretrained(bert_mdl_size).to(device)
for eval_file in eval_file_list:
#try:
def get_bert_embedding(input_text):
input_text = remove_punctuation_and_lowercase(input_text)
#print(input_text)
inputs = bert_tokenizer(input_text, return_tensors="pt")
if inputs['input_ids'].shape[1] > 512:
inputs['input_ids'] = inputs['input_ids'][:, :512]
inputs['token_type_ids'] = inputs['token_type_ids'][:, :512]
inputs['attention_mask'] = inputs['attention_mask'][:, :512]
#print('trim the length')
#print(inputs['input_ids'].shape)
outputs = bert_model(**inputs.to(device))
last_hidden_states = torch.mean(outputs.last_hidden_state[0], dim=0).cpu().detach().numpy()
return last_hidden_states
def get_gpt_embedding(input_text, mdl_size='text-embedding-ada-002'):
openai.api_key = 'your_open_ai_key'
response = openai.Embedding.create(
input=input_text,
model=mdl_size
)
embeddings = response['data'][0]['embedding']
return embeddings
def cosine_similarity(vector1, vector2):
dot_product = sum(v1 * v2 for v1, v2 in zip(vector1, vector2))
magnitude1 = math.sqrt(sum(v1 ** 2 for v1 in vector1))
magnitude2 = math.sqrt(sum(v2 ** 2 for v2 in vector2))
return dot_product / (magnitude1 * magnitude2)
def remove_punctuation_and_lowercase(text):
text = text.translate(str.maketrans('', '', string.punctuation))
text = text.lower()
return text
def gen_cm(all_truth, all_pred, save_name):
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
# list of label names
label_names = list(label_dict.keys())
# generate confusion matrix
cm = confusion_matrix(all_truth, all_pred)
# plot confusion matrix as a figure
plt.imshow(cm, cmap=plt.cm.Blues)
plt.title("Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(label_names))
plt.xticks(tick_marks, label_names, rotation=90, fontsize=6)
plt.yticks(tick_marks, label_names, fontsize=6)
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
# add label values to the confusion matrix cells
for i in range(len(label_names)):
for j in range(len(label_names)):
plt.text(j, i, cm[i, j], ha="center", va="center", color="white")
plt.savefig(save_name, dpi=300)
label_list = np.loadtxt('/data/sls/scratch/yuangong/audiollm/src/data/prep_data/eval_sets/labels/class_labels_indices_tut.csv', delimiter=',', dtype=str, skiprows=1)
# load cached label embedding dict
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting)):
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'r') as f:
json_str = f.read()
label_dict = json.loads(json_str, object_pairs_hook=OrderedDict)
else:
label_dict = OrderedDict()
for i in range(label_list.shape[0]):
class_code = label_list[i, 1]
class_name = label_list[i, 2][1:-1]
if text_embed_setting == 'gpt':
label_dict[class_name] = get_gpt_embedding('sound of ' + class_name.replace('_', ' ').lower())
elif text_embed_setting == 'bert':
label_dict[class_name] = get_bert_embedding('sound of ' + class_name.replace('_', ' ').lower())
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'w') as f:
json_str = json.dumps(label_dict)
f.write(json_str)
print(label_dict.keys())
with open(eval_file, 'r') as fp:
eval_data = json.load(fp)
print(eval_data[0])
print(eval_data[0]['pred'].split(':')[-2].split('.')[0][1:].split(';'))
print(eval_data[0]['audio_id'])
print(eval_data[0]['pred'].split(':')[-1][1:])
print(eval_data[0]['ref'].split(':')[-1])
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting)) == True:
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting), 'r') as f:
embed_cache = f.read()
embed_cache = json.loads(embed_cache)
else:
embed_cache = {}
def get_pred(cur_pred_list, label_dict, mode='max'):
# at beginning, all zero scores
score = np.zeros(num_class)
label_embed_list = list(label_dict.values())
# pred might not be a single text
for cur_pred in cur_pred_list:
if cur_pred in embed_cache:
cur_pred_embed = embed_cache[cur_pred]
else:
if text_embed_setting == 'gpt':
cur_pred_embed = get_gpt_embedding(cur_pred)
else:
cur_pred_embed = get_bert_embedding(cur_pred)
embed_cache[cur_pred] = cur_pred_embed
for i in range(num_class):
if mode == 'accu':
score[i] = score[i] + cosine_similarity(cur_pred_embed, label_embed_list[i])
elif mode == 'max':
score[i] = max(score[i], cosine_similarity(cur_pred_embed, label_embed_list[i]))
cur_pred = np.argmax(score)
return cur_pred
num_sample = len(eval_data)
print('number of samples {:d}'.format(num_sample))
all_pred = np.zeros([num_sample, num_class])
all_truth = np.zeros([num_sample, num_class])
for i in range(num_sample):
cur_audio_id = eval_data[i]['audio_id']
if llm_task == 'cla':
cur_pred_list = eval_data[i]['pred'].split(':')[-1].split('.')[0][1:].split(';')
cur_pred_list = ['sound of ' + x.lower().lstrip() for x in cur_pred_list]
elif llm_task == 'caption':
cur_pred_list = eval_data[i]['pred'].split(':')[-1][1:]
cur_pred_list = ['sound of ' + cur_pred_list.lower()]
cur_truth = eval_data[i]['ref'].split(':')[-1].replace('_',' ')
cur_truth_idx = list(label_dict.keys()).index(cur_truth)
print(cur_truth_idx)
cur_pred_idx = get_pred(cur_pred_list, label_dict)
print('Truth: ', cur_truth_idx, list(label_dict.keys())[cur_truth_idx], 'Pred: ', cur_pred_idx, list(label_dict.keys())[cur_pred_idx], cur_truth_idx==cur_pred_idx, cur_pred_list)
all_pred[i, cur_pred_idx] = 1.0
all_truth[i, cur_truth_idx] = 1.0
save_fold = "/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/{:s}_{:s}_{:s}_cla_report".format('.'.join(eval_file.split('/')[-1].split('.')[:-1]), llm_task, text_embed_setting)
if os.path.exists(save_fold) == False:
os.makedirs(save_fold)
np.save(save_fold + '/all_pred.npy', all_pred)
np.save(save_fold + '/all_truth.npy', all_truth)
stats = calculate_stats(all_pred, all_truth)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
acc = stats[0]['acc']
np.savetxt(save_fold + '/result_summary.csv', [mAP, mAUC, acc], delimiter=',')
embed_cache = json.dumps(embed_cache)
save_cache_path = '/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format(dataset, llm_task, text_embed_setting)
with open(save_cache_path, 'w') as f:
f.write(embed_cache)
sk_acc = accuracy_score(all_truth, all_pred)
print('tut accuracy: ', acc, sk_acc)
report = classification_report(all_truth, all_pred, target_names=list(label_dict.keys()))
with open(save_fold + "/cla_summary.txt", "w") as f:
f.write(report)
gen_cm(all_truth.argmax(axis=1), all_pred.argmax(axis=1), save_fold + "/cm.png")
# except:
# pass
| [] |
2024-01-10 | YuanGongND/ltu | src~ltu~eval~eval_fsd50k.py | # -*- coding: utf-8 -*-
# @Time : 4/10/23 5:05 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : eval_llm_cla.py
# evaluation classification based on gpt/bert embeddings
import os.path
import openai
import numpy as np
import math
import json
import string
import torch
import numpy as np
from collections import OrderedDict
from transformers import AutoTokenizer, BertModel
from sklearn.metrics import accuracy_score, classification_report
import collections
import csv
from stats import calculate_stats
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
dataset = 'fsd50k'
llm_task = 'caption'
text_embed_setting = 'gpt'
base_path = '/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/'
eval_file_list = ['fsd_formal_audio_lora_mix_from_proj_fz_no_adapter_checkpoint-22000_caption_0.10_0.95_500_repp110.json']
eval_file_list = [ base_path + x for x in eval_file_list]
for x in eval_file_list:
assert os.path.exists(x) == True
num_class = 200
device = "cuda" if torch.cuda.is_available() else "cpu"
bert_mdl_size = 'bert-large-uncased'
bert_tokenizer = AutoTokenizer.from_pretrained(bert_mdl_size, model_max_length=512)
bert_model = BertModel.from_pretrained(bert_mdl_size).to(device)
for eval_file in eval_file_list:
try:
def get_bert_embedding(input_text):
input_text = remove_punctuation_and_lowercase(input_text)
#print(input_text)
inputs = bert_tokenizer(input_text, return_tensors="pt")
if inputs['input_ids'].shape[1] > 512:
inputs['input_ids'] = inputs['input_ids'][:, :512]
inputs['token_type_ids'] = inputs['token_type_ids'][:, :512]
inputs['attention_mask'] = inputs['attention_mask'][:, :512]
outputs = bert_model(**inputs.to(device))
last_hidden_states = torch.mean(outputs.last_hidden_state[0], dim=0).cpu().detach().numpy()
return last_hidden_states
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(10))
def embedding_with_backoff(**kwargs):
return openai.Embedding.create(**kwargs)
def get_gpt_embedding(input_text, mdl_size='text-embedding-ada-002'):
openai.api_key = 'your_open_ai_key'
response = embedding_with_backoff(
input=input_text,
model=mdl_size
)
embeddings = response['data'][0]['embedding']
return embeddings
def cosine_similarity(vector1, vector2):
dot_product = sum(v1 * v2 for v1, v2 in zip(vector1, vector2))
magnitude1 = math.sqrt(sum(v1 ** 2 for v1 in vector1))
magnitude2 = math.sqrt(sum(v2 ** 2 for v2 in vector2))
return dot_product / (magnitude1 * magnitude2)
def remove_punctuation_and_lowercase(text):
text = text.translate(str.maketrans('', '', string.punctuation))
text = text.lower()
return text
def gen_cm(all_truth, all_pred, save_name):
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
# list of label names
label_names = list(label_dict.keys())
# generate confusion matrix
cm = confusion_matrix(all_truth, all_pred)
# plot confusion matrix as a figure
plt.imshow(cm, cmap=plt.cm.Blues)
plt.title("Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(label_names))
plt.xticks(tick_marks, label_names, rotation=90, fontsize=6)
plt.yticks(tick_marks, label_names, fontsize=6)
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
# add label values to the confusion matrix cells
for i in range(len(label_names)):
for j in range(len(label_names)):
plt.text(j, i, cm[i, j], ha="center", va="center", color="white")
plt.savefig(save_name, dpi=300)
def make_name_dict(label_csv):
name_lookup = collections.OrderedDict()
with open(label_csv, 'r') as f:
csv_reader = csv.DictReader(f)
line_count = 0
for row in csv_reader:
name_lookup[row['mid']] = row['display_name']
line_count += 1
return name_lookup
label_list = np.loadtxt('/data/sls/scratch/yuangong/audiollm/src/data/prep_data/eval_sets/labels/class_labels_indices_fsd50k.csv', delimiter=',', dtype=str, skiprows=1)
print(label_list)
# load cached label embedding dict
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting)):
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'r') as f:
json_str = f.read()
label_dict = json.loads(json_str, object_pairs_hook=OrderedDict)
else:
label_dict = OrderedDict()
for i in range(label_list.shape[0]):
class_code = label_list[i, 1]
class_name = label_list[i, 2].replace('_', ' ')
if text_embed_setting == 'gpt':
label_dict[class_name] = get_gpt_embedding('sound of ' + class_name.lower())
elif text_embed_setting == 'bert':
label_dict[class_name] = get_bert_embedding('sound of ' + class_name.lower())
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'w') as f:
json_str = json.dumps(label_dict)
f.write(json_str)
print(label_dict.keys())
with open(eval_file, 'r') as fp:
eval_data = json.load(fp)
print(eval_data[1])
print(eval_data[1]['audio_id'])
print(eval_data[1]['pred'].replace('"', '').split('Audio caption')[-1][2:])
print(eval_data[1]['pred'].split(': ')[-1].split('; '))
print(eval_data[1]['ref'].split(': ')[-1].split('; '))
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting)) == True:
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting), 'r') as f:
embed_cache = f.read()
embed_cache = json.loads(embed_cache)
else:
embed_cache = {}
def get_pred(cur_pred_list, label_dict, mode='accu'):
# at beginning, all zero scores
score = np.zeros(num_class)
label_embed_list = list(label_dict.values())
# pred might not be a single text
for cur_pred in cur_pred_list:
if cur_pred in embed_cache:
cur_pred_embed = embed_cache[cur_pred]
else:
if text_embed_setting == 'gpt':
cur_pred_embed = get_gpt_embedding(cur_pred)
else:
cur_pred_embed = get_bert_embedding(cur_pred)
embed_cache[cur_pred] = cur_pred_embed
for i in range(num_class):
if mode == 'accu':
score[i] = score[i] + cosine_similarity(cur_pred_embed, label_embed_list[i])
elif mode == 'max':
score[i] = max(score[i], cosine_similarity(cur_pred_embed, label_embed_list[i]))
return score
num_sample = len(eval_data)
print('number of samples {:d}'.format(num_sample))
all_pred = np.zeros([num_sample, num_class])
all_truth = np.zeros([num_sample, num_class])
for i in range(num_sample):
cur_audio_id = eval_data[i]['audio_id']
if llm_task == 'cla':
cur_pred_list = eval_data[i]['pred'].split(': ')[-1].split('; ')
cur_pred_list = ['sound of ' + x.lower().lstrip() for x in cur_pred_list]
elif llm_task == 'caption':
cur_pred_list = eval_data[i]['pred'].replace('"', '').split('Audio caption')[-1][2:]
cur_pred_list = ['sound of ' + cur_pred_list.lower()]
cur_truth_list = eval_data[i]['ref'].split(': ')[-1].split('; ')
for cur_truth in cur_truth_list:
cur_truth_idx = list(label_dict.keys()).index(cur_truth)
all_truth[i, cur_truth_idx] = 1.0
all_pred[i] = get_pred(cur_pred_list, label_dict)
if i % 100 == 0:
print('{:d} / {:d} processed'.format(i, num_sample))
save_fold = "/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/{:s}_{:s}_{:s}_cla_report".format('.'.join(eval_file.split('/')[-1].split('.')[:-1]), llm_task, text_embed_setting)
if os.path.exists(save_fold) == False:
os.makedirs(save_fold)
np.save(save_fold + '/all_pred.npy', all_pred)
np.save(save_fold + '/all_truth.npy', all_truth)
stats = calculate_stats(all_pred, all_truth)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
acc = stats[0]['acc']
np.savetxt(save_fold + '/result_summary.csv', [mAP, mAUC, acc], delimiter=',')
embed_cache = json.dumps(embed_cache)
save_cache_path = '/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format(dataset, llm_task, text_embed_setting)
with open(save_cache_path, 'w') as f:
f.write(embed_cache)
except:
pass
| [] |
2024-01-10 | YuanGongND/ltu | src~ltu_as~eval~eval_esc.py | # -*- coding: utf-8 -*-
# @Time : 4/10/23 5:05 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : eval_esc.py
# evaluation classification based on gpt/bert embeddings
import os.path
import datetime
current_time = datetime.datetime.now()
time_string = current_time.strftime("%Y%m%d%H%M%S")
import openai
import math
import json
import string
import torch
import numpy as np
from collections import OrderedDict
from sklearn.metrics import accuracy_score, classification_report
from stats import calculate_stats
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
from itertools import product
def get_combinations(a, b):
combinations = []
for x, y in product(a, b):
combinations.append(f"{x}_{y}")
return combinations
dataset = 'esc50'
llm_task = 'caption'
text_embed_setting = 'gpt'
directory = '/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/raw/'
prefix = 'esc50'
files = os.listdir(directory)
eval_file_list = [os.path.join(directory, file) for file in files if file.startswith(prefix)]
eval_file_list.sort()
eval_file_list = eval_file_list[::-1]
print(eval_file_list)
eval_file_list = [
'esc50_formal_speech_all_open_close_final_checkpoint-35000_caption_fp16_joint_3',
'esc50_formal_audio_lora_mix_from_close_from_e2e_cla_from_proj_checkpoint-20000_caption_0.10_0.95_500'
]
eval_file_list = [directory + x + '.json' for x in eval_file_list]
for x in eval_file_list:
assert os.path.exists(x) == True
num_class = 50
device = "cuda" if torch.cuda.is_available() else "cpu"
bert_mdl_size = 'bert-large-uncased'
bert_tokenizer = ""
bert_model = ""
all_res = []
for eval_file in eval_file_list:
def get_bert_embedding(input_text):
input_text = remove_punctuation_and_lowercase(input_text)
inputs = bert_tokenizer(input_text, return_tensors="pt")
if inputs['input_ids'].shape[1] > 512:
inputs['input_ids'] = inputs['input_ids'][:, :512]
inputs['token_type_ids'] = inputs['token_type_ids'][:, :512]
inputs['attention_mask'] = inputs['attention_mask'][:, :512]
outputs = bert_model(**inputs.to(device))
last_hidden_states = torch.mean(outputs.last_hidden_state[0], dim=0).cpu().detach().numpy().tolist()
return last_hidden_states
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(10))
def embedding_with_backoff(**kwargs):
return openai.Embedding.create(**kwargs)
def get_gpt_embedding(input_text, mdl_size='text-embedding-ada-002'):
# TODO: change to your openai key
openai.api_key = 'your_openai_key'
response = embedding_with_backoff(
input=input_text,
model=mdl_size
)
embeddings = response['data'][0]['embedding']
return embeddings
def cosine_similarity(vector1, vector2):
dot_product = sum(v1 * v2 for v1, v2 in zip(vector1, vector2))
magnitude1 = math.sqrt(sum(v1 ** 2 for v1 in vector1))
magnitude2 = math.sqrt(sum(v2 ** 2 for v2 in vector2))
return dot_product / (magnitude1 * magnitude2)
def remove_punctuation_and_lowercase(text):
text = text.translate(str.maketrans('', '', string.punctuation))
text = text.lower()
return text
def gen_cm(all_truth, all_pred, save_name):
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
# list of label names
label_names = list(label_dict.keys())
# generate confusion matrix
cm = confusion_matrix(all_truth, all_pred)
# plot confusion matrix as a figure
plt.imshow(cm, cmap=plt.cm.Blues)
plt.title("Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(label_names))
plt.xticks(tick_marks, label_names, rotation=90, fontsize=6)
plt.yticks(tick_marks, label_names, fontsize=6)
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
# add label values to the confusion matrix cells
for i in range(len(label_names)):
for j in range(len(label_names)):
plt.text(j, i, cm[i, j], ha="center", va="center", color="white")
plt.savefig(save_name, dpi=300)
label_list = np.loadtxt('/data/sls/scratch/yuangong/whisper-a/egs/esc-50/data/esc_class_labels_indices.csv', delimiter=',', dtype=str, skiprows=1)
# load cached label embedding dict
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting)):
with open('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'r') as f:
json_str = f.read()
label_dict = json.loads(json_str, object_pairs_hook=OrderedDict)
else:
label_dict = OrderedDict()
for i in range(label_list.shape[0]):
class_code = label_list[i, 1]
class_name = label_list[i, 2][1:-1]
if text_embed_setting == 'gpt':
label_dict[class_name] = get_gpt_embedding('sound of ' + class_name.replace('_', ' ').lower())
elif text_embed_setting == 'bert':
label_dict[class_name] = get_bert_embedding('sound of ' + class_name.replace('_', ' ').lower())
with open('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'w') as f:
json_str = json.dumps(label_dict)
f.write(json_str)
with open(eval_file, 'r') as fp:
eval_data = json.load(fp)
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting)) == True:
with open('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting), 'r') as f:
embed_cache = f.read()
embed_cache = json.loads(embed_cache)
else:
embed_cache = {}
def get_pred(cur_pred_list, label_dict, mode='accu'):
# at beginning, all zero scores
score = np.zeros(num_class)
label_embed_list = list(label_dict.values())
# pred might not be a single text
for cur_pred in cur_pred_list:
if cur_pred in embed_cache:
cur_pred_embed = embed_cache[cur_pred]
else:
if text_embed_setting == 'gpt':
cur_pred_embed = get_gpt_embedding(cur_pred)
else:
cur_pred_embed = get_bert_embedding(cur_pred)
embed_cache[cur_pred] = cur_pred_embed
for i in range(num_class):
if mode == 'accu':
score[i] = score[i] + cosine_similarity(cur_pred_embed, label_embed_list[i])
elif mode == 'max':
score[i] = max(score[i], cosine_similarity(cur_pred_embed, label_embed_list[i]))
cur_pred = np.argmax(score)
return cur_pred
num_sample = len(eval_data)
assert num_sample == 2000
print('number of samples {:d}'.format(num_sample))
all_pred = np.zeros([num_sample, num_class])
all_truth = np.zeros([num_sample, num_class])
for i in range(num_sample):
cur_audio_id = eval_data[i]['audio_id']
if llm_task == 'cla':
cur_pred_list = eval_data[i]['pred'].split(':')[-1].split('.')[0][1:].split(';')
cur_pred_list = ['sound of ' + x.lower().lstrip() for x in cur_pred_list]
elif llm_task == 'caption':
cur_pred_list = eval_data[i]['pred'].split(':')[-1].lstrip()
cur_pred_list = ['sound of ' + cur_pred_list.lower()]
cur_truth = eval_data[i]['ref'].split(':')[-1]
cur_truth_idx = list(label_dict.keys()).index(cur_truth)
cur_pred_idx = get_pred(cur_pred_list, label_dict)
if (cur_truth_idx==cur_pred_idx) == False:
print('Truth: ', cur_truth_idx, list(label_dict.keys())[cur_truth_idx], 'Pred: ', cur_pred_idx, list(label_dict.keys())[cur_pred_idx], cur_truth_idx==cur_pred_idx, cur_pred_list)
all_pred[i, cur_pred_idx] = 1.0
all_truth[i, cur_truth_idx] = 1.0
save_fold = "/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/{:s}_{:s}_{:s}_cla_report".format('.'.join(eval_file.split('/')[-1].split('.')[:-1]), llm_task, text_embed_setting)
if os.path.exists(save_fold) == False:
os.makedirs(save_fold)
np.save(save_fold + '/all_pred.npy', all_pred)
np.save(save_fold + '/all_truth.npy', all_truth)
stats = calculate_stats(all_pred, all_truth)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
acc = stats[0]['acc']
np.savetxt(save_fold + '/result_summary.csv', [mAP, mAUC, acc], delimiter=',')
embed_cache = json.dumps(embed_cache)
save_cache_path = '/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format(dataset, llm_task, text_embed_setting)
with open(save_cache_path, 'w') as f:
f.write(embed_cache)
sk_acc = accuracy_score(all_truth, all_pred)
print('esc50 accuracy: ', acc, sk_acc)
all_res.append([eval_file, acc])
np.savetxt('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/summary/summary_esc50_{:s}.csv'.format(time_string), all_res, delimiter=',', fmt='%s') | [] |
2024-01-10 | YuanGongND/ltu | src~ltu~qa_generation~as_gpt_qa.py | # -*- coding: utf-8 -*-
# @Time : 4/29/23 3:04 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : gpt_qa.py
import openai
import numpy as np
import json
import re
import time
import pickle
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
sys_prompt = """
Based on the following audio clip, generate 10 different types of complex open-ended questions that require step-by-step thinking, and corresponding step-by-step answers.
The following information is provided: the sound events appear in the audio clip, together with its acoustic features, and corresponding onset and offset time stamps. A description of the content of the audio clip is also provided.
Questions should be about the audio, e.g., which sound event is recognized and why (e.g., based on its acoustic feature), what can be inferred based on the combination of sound events; the temporal relationship between the sound events and what can be inferred from that; the potential scenario that such an audio clip could happen, if the audio clip is special (e.g., urgent, funny, interesting, abnormal, unique, etc) and why, what mood or atmosphere this audio clip conveys, etc.
The more complex and diverse the question, the better.
Format each QA pair in a single line as a JSON dictionary (key "q" for question, and "a" for answer, wrapped with { and }). Do not include any other explanation.
"""
def decode_output(gpt_out):
#print(gpt_out)
gpt_out = gpt_out.replace('\n', '')
# gpt_out = gpt_out.replace("'q'", "\"q\"")
# gpt_out = gpt_out.replace("'a'", "\"a\"")
gpt_out = re.findall(r'\{.*?\}', gpt_out)
qa_list = [json.loads(x) for x in gpt_out]
return qa_list
def save_list_of_lists_to_disk(data, filename):
with open(filename, 'w') as f:
json.dump(data, f, indent=1)
#print(f"List of lists saved to {filename}.")
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(10))
def generate_with_backoff(**kwargs):
return openai.ChatCompletion.create(**kwargs)
def generate_prompt(prompt_list, output_file, max_tokens=1024, total_n_completions=1, model_engine="gpt-3.5-turbo"):
if 'gpt-4' in model_engine:
openai.api_key = 'your_openai_key'
else:
openai.api_key = 'your_openai_key'
all_outputs = []
raw_outputs = []
used_token = 0
for prompt_pair in prompt_list:
audio_id, prompt = prompt_pair[0], prompt_pair[1]
response = generate_with_backoff(
model=model_engine,
messages=[{"role": "system", "content": sys_prompt},
{"role": "user", "content": prompt+'" {text}"'}],
max_tokens=max_tokens,
n=total_n_completions)
cur_completion = response.choices[0].message.content.strip()
raw_outputs.append([cur_completion])
used_token += len(cur_completion)/4 + len(sys_prompt)/4 + len(prompt)/4
cur_prompt_outputs = decode_output(cur_completion)
for j in range(len(cur_prompt_outputs)):
new_entry ={}
new_entry['audio_id'] = audio_id
new_entry['instruction'] = cur_prompt_outputs[j]['q']
new_entry['output'] = cur_prompt_outputs[j]['a']
all_outputs.append(new_entry)
if len(all_outputs) % 10 == 0:
print('{:d} questions generated, {:d} tokens'.format(len(all_outputs), int(used_token)))
with open(output_file, 'w') as f:
json.dump(all_outputs, f, indent=1)
save_list_of_lists_to_disk(raw_outputs, output_file[:-4]+'_raw.json')
with open(output_file, 'w') as f:
json.dump(all_outputs, f, indent=1)
model_engine="gpt-3.5-turbo"
with open('/data/sls/scratch/yuangong/audiollm/src/data/prep_data/all_info/datafiles/as_strong_eval_sample_600.json', 'r') as fp:
data = json.load(fp)
prompt_list = [(x['audio_id'], 'Sound Events: ' + x['temporal'] + '; Description: ' + x['caption']) for x in data]
begin = time.time()
generate_prompt(prompt_list, '/data/sls/scratch/yuangong/audiollm/src/data/prep_data/all_info/datafiles/as_strong_eval_600_qa_{:s}.json'.format(model_engine), model_engine=model_engine)
end = time.time()
print('time eclipse, ', end-begin) | [
"caption",
"; Description: ",
"\nBased on the following audio clip, generate 10 different types of complex open-ended questions that require step-by-step thinking, and corresponding step-by-step answers.\nThe following information is provided: the sound events appear in the audio clip, together with its acoustic features, and corresponding onset and offset time stamps. A description of the content of the audio clip is also provided. \nQuestions should be about the audio, e.g., which sound event is recognized and why (e.g., based on its acoustic feature), what can be inferred based on the combination of sound events; the temporal relationship between the sound events and what can be inferred from that; the potential scenario that such an audio clip could happen, if the audio clip is special (e.g., urgent, funny, interesting, abnormal, unique, etc) and why, what mood or atmosphere this audio clip conveys, etc. \nThe more complex and diverse the question, the better.\nFormat each QA pair in a single line as a JSON dictionary (key \"q\" for question, and \"a\" for answer, wrapped with { and }). Do not include any other explanation.\n",
"Sound Events: ",
"PLACEHOLDER\" {text}\""
] |
2024-01-10 | YuanGongND/ltu | src~ltu~eval~eval_vggsound.py | # -*- coding: utf-8 -*-
# @Time : 4/10/23 5:05 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : eval_llm_cla.py
# evaluation classification based on gpt/bert embeddings
import os.path
import openai
import numpy as np
import math
import json
import string
import torch
import numpy as np
from collections import OrderedDict
from transformers import AutoTokenizer, BertModel
from sklearn.metrics import accuracy_score, classification_report
from stats import calculate_stats
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
dataset = 'vgg'
llm_task = 'caption'
text_embed_setting = 'gpt'
# eval_file_list1 = ['/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/vgg_formal_audio_lora_mix_from_close_from_e2e_cla_from_proj_checkpoint-' + str(x) + '_{:s}_0.10_0.95_500_repp110.json'.format(llm_task) for x in range(16000, 24000, 2000)]
# eval_file_list2 = ['/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/vgg_formal_audio_lora_mix_from_close_from_e2e_cla_from_proj_1e-3_checkpoint-' + str(x) + '_{:s}_0.10_0.95_500_repp110.json'.format(llm_task) for x in range(16000, 24000, 2000)]
# eval_file_list = eval_file_list1 + eval_file_list2
base_path = '/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/'
eval_file_list = [#'vgg_formal_audio_lora_mix_from_proj_fz_no_adapter_checkpoint-22000_caption_0.10_0.95_500_repp110.json',
#'vgg_formal_audio_lora_close_from_e2e_cla_from_proj_checkpoint-6000_caption_0.10_0.95_500_repp110.json',
#'vgg_formal_audio_lora_mix_from_proj_fz_audio_encoder_checkpoint-22000_caption_0.10_0.95_500_repp110.json',
'vgg_formal_audio_lora_mix_no_corr_cont_checkpoint-22000_caption_0.10_0.95_500_repp110.json']
eval_file_list = [ base_path + x for x in eval_file_list]
for x in eval_file_list:
assert os.path.exists(x) == True
num_class = 309
device = "cuda" if torch.cuda.is_available() else "cpu"
bert_mdl_size = 'bert-large-uncased'
bert_tokenizer = AutoTokenizer.from_pretrained(bert_mdl_size, model_max_length=512)
bert_model = BertModel.from_pretrained(bert_mdl_size).to(device)
for eval_file in eval_file_list:
try:
def get_bert_embedding(input_text):
input_text = remove_punctuation_and_lowercase(input_text)
#print(input_text)
inputs = bert_tokenizer(input_text, return_tensors="pt")
if inputs['input_ids'].shape[1] > 512:
inputs['input_ids'] = inputs['input_ids'][:, :512]
inputs['token_type_ids'] = inputs['token_type_ids'][:, :512]
inputs['attention_mask'] = inputs['attention_mask'][:, :512]
outputs = bert_model(**inputs.to(device))
last_hidden_states = torch.mean(outputs.last_hidden_state[0], dim=0).cpu().detach().numpy()
return last_hidden_states
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(10))
def embedding_with_backoff(**kwargs):
return openai.Embedding.create(**kwargs)
def get_gpt_embedding(input_text, mdl_size='text-embedding-ada-002'):
openai.api_key = 'your_open_ai_key'
response = embedding_with_backoff(
input=input_text,
model=mdl_size
)
embeddings = response['data'][0]['embedding']
return embeddings
def cosine_similarity(vector1, vector2):
dot_product = sum(v1 * v2 for v1, v2 in zip(vector1, vector2))
magnitude1 = math.sqrt(sum(v1 ** 2 for v1 in vector1))
magnitude2 = math.sqrt(sum(v2 ** 2 for v2 in vector2))
return dot_product / (magnitude1 * magnitude2)
def remove_punctuation_and_lowercase(text):
text = text.translate(str.maketrans('', '', string.punctuation))
text = text.lower()
return text
def gen_cm(all_truth, all_pred, save_name):
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
# list of label names
label_names = list(label_dict.keys())
# generate confusion matrix
cm = confusion_matrix(all_truth, all_pred)
# plot confusion matrix as a figure
plt.imshow(cm, cmap=plt.cm.Blues)
plt.title("Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(label_names))
plt.xticks(tick_marks, label_names, rotation=90, fontsize=6)
plt.yticks(tick_marks, label_names, fontsize=6)
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
# add label values to the confusion matrix cells
for i in range(len(label_names)):
for j in range(len(label_names)):
plt.text(j, i, cm[i, j], ha="center", va="center", color="white")
plt.savefig(save_name, dpi=300)
label_list = np.loadtxt('/data/sls/scratch/yuangong/cav-mae/pretrained_model/datafiles/vggsound/class_labels_indices_vgg.csv', delimiter=',', dtype=str, skiprows=1)
print(label_list)
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting)):
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'r') as f:
json_str = f.read()
label_dict = json.loads(json_str, object_pairs_hook=OrderedDict)
else:
label_dict = OrderedDict()
for i in range(label_list.shape[0]):
class_code = label_list[i, 1]
class_name = label_list[i, 2].replace('_', ', ')
if text_embed_setting == 'gpt':
label_dict[class_name] = get_gpt_embedding('sound of ' + class_name.replace('_', ', ').lower())
elif text_embed_setting == 'bert':
label_dict[class_name] = get_bert_embedding('sound of ' + class_name.replace('_', ', ').lower())
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'w') as f:
json_str = json.dumps(label_dict)
f.write(json_str)
print(label_dict.keys())
with open(eval_file, 'r') as fp:
eval_data = json.load(fp)
print(eval_data[0])
print(eval_data[0]['audio_id'])
print(eval_data[0]['pred'].replace('"', '').split('Audio caption')[-1][2:])
print(eval_data[0]['ref'].split(': ')[-1])
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting)) == True:
with open('/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting), 'r') as f:
embed_cache = f.read()
embed_cache = json.loads(embed_cache)
else:
embed_cache = {}
def get_pred(cur_pred_list, label_dict, mode='max'):
# at beginning, all zero scores
score = np.zeros(num_class)
label_embed_list = list(label_dict.values())
# pred might not be a single text
for cur_pred in cur_pred_list:
if cur_pred in embed_cache:
cur_pred_embed = embed_cache[cur_pred]
else:
if text_embed_setting == 'gpt':
cur_pred_embed = get_gpt_embedding(cur_pred)
else:
cur_pred_embed = get_bert_embedding(cur_pred)
embed_cache[cur_pred] = cur_pred_embed
for i in range(num_class):
if mode == 'accu':
score[i] = score[i] + cosine_similarity(cur_pred_embed, label_embed_list[i])
elif mode == 'max':
score[i] = max(score[i], cosine_similarity(cur_pred_embed, label_embed_list[i]))
return score
num_sample = len(eval_data)
print('number of samples {:d}'.format(num_sample))
all_pred = np.zeros([num_sample, num_class])
all_truth = np.zeros([num_sample, num_class])
for i in range(num_sample):
cur_audio_id = eval_data[i]['audio_id']
if llm_task == 'cla':
cur_pred_list = eval_data[i]['pred'].split(':')[-1].split('.')[0][1:].split(';')
cur_pred_list = ['sound of ' + x.lower().lstrip() for x in cur_pred_list]
elif llm_task == 'caption':
cur_pred_list = eval_data[i]['pred'].replace('"', '').split('Audio caption')[-1][2:]
cur_pred_list = ['sound of ' + cur_pred_list.lower()]
cur_truth = eval_data[i]['ref'].split(': ')[-1].lower()
cur_truth_idx = list(label_dict.keys()).index(cur_truth)
cur_pred_score = get_pred(cur_pred_list, label_dict)
all_pred[i] = cur_pred_score
all_truth[i, cur_truth_idx] = 1.0
if i% 100 == 0:
print('{:d} / {:d} processed'.format(i, num_sample))
save_fold = "/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/{:s}_{:s}_{:s}_report".format('.'.join(eval_file.split('/')[-1].split('.')[:-1]), llm_task, text_embed_setting)
if os.path.exists(save_fold) == False:
os.makedirs(save_fold)
np.save(save_fold + '/all_pred.npy', all_pred)
np.save(save_fold + '/all_truth.npy', all_truth)
stats = calculate_stats(all_pred, all_truth)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
acc = stats[0]['acc']
np.savetxt(save_fold + '/result_summary.csv', [mAP, mAUC, acc], delimiter=',')
embed_cache = json.dumps(embed_cache)
save_cache_path = '/data/sls/scratch/yuangong/audiollm/src/llm/alpaca-lora-main/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format(dataset, llm_task, text_embed_setting)
with open(save_cache_path, 'w') as f:
f.write(embed_cache)
print('vgg accuracy: ', acc)
except:
pass | [] |
2024-01-10 | YuanGongND/ltu | src~ltu~eval~eval_esc.py | # -*- coding: utf-8 -*-
# @Time : 4/10/23 5:05 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : eval_esc.py
# evaluation classification based on gpt/bert embeddings
import os.path
import datetime
current_time = datetime.datetime.now()
time_string = current_time.strftime("%Y%m%d%H%M%S")
import openai
import math
import json
import string
import torch
import numpy as np
from collections import OrderedDict
from sklearn.metrics import accuracy_score, classification_report
from stats import calculate_stats
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
from itertools import product
def get_combinations(a, b):
combinations = []
for x, y in product(a, b):
combinations.append(f"{x}_{y}")
return combinations
dataset = 'esc50'
llm_task = 'caption'
text_embed_setting = 'gpt'
directory = '/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/raw/'
prefix = 'esc50'
files = os.listdir(directory)
eval_file_list = [os.path.join(directory, file) for file in files if file.startswith(prefix)]
eval_file_list.sort()
eval_file_list = eval_file_list[::-1]
print(eval_file_list)
eval_file_list = [
'esc50_formal_speech_all_open_close_final_checkpoint-35000_caption_fp16_joint_3',
'esc50_formal_audio_lora_mix_from_close_from_e2e_cla_from_proj_checkpoint-20000_caption_0.10_0.95_500'
]
eval_file_list = [directory + x + '.json' for x in eval_file_list]
for x in eval_file_list:
assert os.path.exists(x) == True
num_class = 50
device = "cuda" if torch.cuda.is_available() else "cpu"
bert_mdl_size = 'bert-large-uncased'
bert_tokenizer = ""
bert_model = ""
all_res = []
for eval_file in eval_file_list:
def get_bert_embedding(input_text):
input_text = remove_punctuation_and_lowercase(input_text)
inputs = bert_tokenizer(input_text, return_tensors="pt")
if inputs['input_ids'].shape[1] > 512:
inputs['input_ids'] = inputs['input_ids'][:, :512]
inputs['token_type_ids'] = inputs['token_type_ids'][:, :512]
inputs['attention_mask'] = inputs['attention_mask'][:, :512]
outputs = bert_model(**inputs.to(device))
last_hidden_states = torch.mean(outputs.last_hidden_state[0], dim=0).cpu().detach().numpy().tolist()
return last_hidden_states
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(10))
def embedding_with_backoff(**kwargs):
return openai.Embedding.create(**kwargs)
def get_gpt_embedding(input_text, mdl_size='text-embedding-ada-002'):
# TODO: change to your openai key
openai.api_key = 'your_openai_key'
response = embedding_with_backoff(
input=input_text,
model=mdl_size
)
embeddings = response['data'][0]['embedding']
return embeddings
def cosine_similarity(vector1, vector2):
dot_product = sum(v1 * v2 for v1, v2 in zip(vector1, vector2))
magnitude1 = math.sqrt(sum(v1 ** 2 for v1 in vector1))
magnitude2 = math.sqrt(sum(v2 ** 2 for v2 in vector2))
return dot_product / (magnitude1 * magnitude2)
def remove_punctuation_and_lowercase(text):
text = text.translate(str.maketrans('', '', string.punctuation))
text = text.lower()
return text
def gen_cm(all_truth, all_pred, save_name):
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
# list of label names
label_names = list(label_dict.keys())
# generate confusion matrix
cm = confusion_matrix(all_truth, all_pred)
# plot confusion matrix as a figure
plt.imshow(cm, cmap=plt.cm.Blues)
plt.title("Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(label_names))
plt.xticks(tick_marks, label_names, rotation=90, fontsize=6)
plt.yticks(tick_marks, label_names, fontsize=6)
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
# add label values to the confusion matrix cells
for i in range(len(label_names)):
for j in range(len(label_names)):
plt.text(j, i, cm[i, j], ha="center", va="center", color="white")
plt.savefig(save_name, dpi=300)
label_list = np.loadtxt('/data/sls/scratch/yuangong/whisper-a/egs/esc-50/data/esc_class_labels_indices.csv', delimiter=',', dtype=str, skiprows=1)
# load cached label embedding dict
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting)):
with open('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'r') as f:
json_str = f.read()
label_dict = json.loads(json_str, object_pairs_hook=OrderedDict)
else:
label_dict = OrderedDict()
for i in range(label_list.shape[0]):
class_code = label_list[i, 1]
class_name = label_list[i, 2][1:-1]
if text_embed_setting == 'gpt':
label_dict[class_name] = get_gpt_embedding('sound of ' + class_name.replace('_', ' ').lower())
elif text_embed_setting == 'bert':
label_dict[class_name] = get_bert_embedding('sound of ' + class_name.replace('_', ' ').lower())
with open('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/label_embed_dict/{:s}_{:s}.json'.format(dataset, text_embed_setting), 'w') as f:
json_str = json.dumps(label_dict)
f.write(json_str)
with open(eval_file, 'r') as fp:
eval_data = json.load(fp)
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting)) == True:
with open('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format( dataset, llm_task, text_embed_setting), 'r') as f:
embed_cache = f.read()
embed_cache = json.loads(embed_cache)
else:
embed_cache = {}
def get_pred(cur_pred_list, label_dict, mode='accu'):
# at beginning, all zero scores
score = np.zeros(num_class)
label_embed_list = list(label_dict.values())
# pred might not be a single text
for cur_pred in cur_pred_list:
if cur_pred in embed_cache:
cur_pred_embed = embed_cache[cur_pred]
else:
if text_embed_setting == 'gpt':
cur_pred_embed = get_gpt_embedding(cur_pred)
else:
cur_pred_embed = get_bert_embedding(cur_pred)
embed_cache[cur_pred] = cur_pred_embed
for i in range(num_class):
if mode == 'accu':
score[i] = score[i] + cosine_similarity(cur_pred_embed, label_embed_list[i])
elif mode == 'max':
score[i] = max(score[i], cosine_similarity(cur_pred_embed, label_embed_list[i]))
cur_pred = np.argmax(score)
return cur_pred
num_sample = len(eval_data)
assert num_sample == 2000
print('number of samples {:d}'.format(num_sample))
all_pred = np.zeros([num_sample, num_class])
all_truth = np.zeros([num_sample, num_class])
for i in range(num_sample):
cur_audio_id = eval_data[i]['audio_id']
if llm_task == 'cla':
cur_pred_list = eval_data[i]['pred'].split(':')[-1].split('.')[0][1:].split(';')
cur_pred_list = ['sound of ' + x.lower().lstrip() for x in cur_pred_list]
elif llm_task == 'caption':
cur_pred_list = eval_data[i]['pred'].split(':')[-1].lstrip()
cur_pred_list = ['sound of ' + cur_pred_list.lower()]
cur_truth = eval_data[i]['ref'].split(':')[-1]
cur_truth_idx = list(label_dict.keys()).index(cur_truth)
cur_pred_idx = get_pred(cur_pred_list, label_dict)
if (cur_truth_idx==cur_pred_idx) == False:
print('Truth: ', cur_truth_idx, list(label_dict.keys())[cur_truth_idx], 'Pred: ', cur_pred_idx, list(label_dict.keys())[cur_pred_idx], cur_truth_idx==cur_pred_idx, cur_pred_list)
all_pred[i, cur_pred_idx] = 1.0
all_truth[i, cur_truth_idx] = 1.0
save_fold = "/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/{:s}_{:s}_{:s}_cla_report".format('.'.join(eval_file.split('/')[-1].split('.')[:-1]), llm_task, text_embed_setting)
if os.path.exists(save_fold) == False:
os.makedirs(save_fold)
np.save(save_fold + '/all_pred.npy', all_pred)
np.save(save_fold + '/all_truth.npy', all_truth)
stats = calculate_stats(all_pred, all_truth)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
acc = stats[0]['acc']
np.savetxt(save_fold + '/result_summary.csv', [mAP, mAUC, acc], delimiter=',')
embed_cache = json.dumps(embed_cache)
save_cache_path = '/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format(dataset, llm_task, text_embed_setting)
with open(save_cache_path, 'w') as f:
f.write(embed_cache)
sk_acc = accuracy_score(all_truth, all_pred)
print('esc50 accuracy: ', acc, sk_acc)
all_res.append([eval_file, acc])
np.savetxt('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/summary/summary_esc50_{:s}.csv'.format(time_string), all_res, delimiter=',', fmt='%s') | [] |
2024-01-10 | YuanGongND/ltu | src~ltu_as~eval~eval_iemocap_cla.py | # -*- coding: utf-8 -*-
# @Time : 6/30/23 3:43 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : eval_iemocap_cla.py
import os.path
import datetime
current_time = datetime.datetime.now()
time_string = current_time.strftime("%Y%m%d%H%M%S")
import openai
import math
import json
import string
import torch
import numpy as np
from collections import OrderedDict
from transformers import AutoTokenizer, BertModel
from sklearn.metrics import accuracy_score, classification_report
from stats import calculate_stats
from itertools import product
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
dataset = 'iemocap'
llm_task = 'emotion_cla'
text_embed_setting = 'gpt'
from itertools import product
def get_combinations(a, b):
combinations = []
for x, y in product(a, b):
combinations.append(f"{x}_{y}")
return combinations
directory = '/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/raw/'
prefix = 'iemocap'
files = os.listdir(directory)
eval_file_list = [os.path.join(directory, file) for file in files if file.startswith(prefix)]
eval_file_list.sort()
eval_file_list = eval_file_list[::-1]
print(eval_file_list)
eval_file_list = [
'iemocap_formal_speech_all_open_close_final_checkpoint-35000_emotion_cla_fp16_joint_3',
'iemocap_formal_audio_lora_mix_from_close_from_e2e_cla_from_proj_checkpoint-20000_emotion_cla_0.10_0.95_500'
]
eval_file_list = [directory + x + '.json' for x in eval_file_list]
for x in eval_file_list:
assert os.path.exists(x) == True
num_class = 4
device = "cuda" if torch.cuda.is_available() else "cpu"
bert_mdl_size = 'bert-large-uncased'
bert_tokenizer = AutoTokenizer.from_pretrained(bert_mdl_size, model_max_length=512)
bert_model = BertModel.from_pretrained(bert_mdl_size).to(device)
all_res = []
for eval_file in eval_file_list:
def get_bert_embedding(input_text):
input_text = remove_punctuation_and_lowercase(input_text)
inputs = bert_tokenizer(input_text, return_tensors="pt")
if inputs['input_ids'].shape[1] > 512:
inputs['input_ids'] = inputs['input_ids'][:, :512]
inputs['token_type_ids'] = inputs['token_type_ids'][:, :512]
inputs['attention_mask'] = inputs['attention_mask'][:, :512]
outputs = bert_model(**inputs.to(device))
last_hidden_states = torch.mean(outputs.last_hidden_state[0], dim=0).cpu().detach().numpy().tolist()
return last_hidden_states
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(10))
def embedding_with_backoff(**kwargs):
return openai.Embedding.create(**kwargs)
def get_gpt_embedding(input_text, mdl_size='text-embedding-ada-002'):
# TODO: change to your openai key
openai.api_key = 'your_openai_key'
response = embedding_with_backoff(
input=input_text,
model=mdl_size
)
embeddings = response['data'][0]['embedding']
return embeddings
def cosine_similarity(vector1, vector2):
dot_product = sum(v1 * v2 for v1, v2 in zip(vector1, vector2))
magnitude1 = math.sqrt(sum(v1 ** 2 for v1 in vector1))
magnitude2 = math.sqrt(sum(v2 ** 2 for v2 in vector2))
return dot_product / (magnitude1 * magnitude2)
def remove_punctuation_and_lowercase(text):
text = text.translate(str.maketrans('', '', string.punctuation))
text = text.lower()
return text
def gen_cm(all_truth, all_pred, save_name):
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
# list of label names
label_names = list(label_dict.keys())
# generate confusion matrix
cm = confusion_matrix(all_truth, all_pred)
# plot confusion matrix as a figure
plt.imshow(cm, cmap=plt.cm.Blues)
plt.title("Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(label_names))
plt.xticks(tick_marks, label_names, rotation=90, fontsize=6)
plt.yticks(tick_marks, label_names, fontsize=6)
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
# add label values to the confusion matrix cells
for i in range(len(label_names)):
for j in range(len(label_names)):
plt.text(j, i, cm[i, j], ha="center", va="center", color="white")
plt.savefig(save_name, dpi=300)
label_list = np.loadtxt('/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/iemocap/datafiles/class_labels_indices_iemocap_emotion_eval.csv', delimiter=',', dtype=str, skiprows=1)
label_dict = OrderedDict()
for i in range(label_list.shape[0]):
class_code = label_list[i, 1]
class_name = label_list[i, 2][1:-1]
if text_embed_setting == 'gpt':
print('The speaker emotion is ' + class_name.replace('_', ' ').lower())
label_dict[class_name] = get_gpt_embedding(class_name.replace('_', ' ').lower()) # 'The speaker emotion is ' +
elif text_embed_setting == 'bert':
label_dict[class_name] = get_bert_embedding('' + class_name.replace('_', ' ').lower())
with open(eval_file, 'r') as fp:
eval_data = json.load(fp)
print(eval_data[0])
print(eval_data[0]['pred'].lstrip()) # .split(':')[-1].split('.')[0].lstrip())
print(eval_data[0]['ref'].split(':')[-1].split('.')[0].lstrip())
print(eval_data[0]['audio_id'])
print(label_dict.keys())
if os.path.exists('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format(dataset, llm_task, text_embed_setting)) == True:
with open('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format(dataset, llm_task, text_embed_setting), 'r') as f:
embed_cache = f.read()
embed_cache = json.loads(embed_cache)
else:
embed_cache = {}
def get_pred(cur_pred_list, label_dict, mode='accu'):
# at beginning, all zero scores
score = np.zeros(num_class)
label_embed_list = list(label_dict.values())
# pred might not be a single text
for cur_pred in cur_pred_list:
if cur_pred in embed_cache:
cur_pred_embed = embed_cache[cur_pred]
else:
if text_embed_setting == 'gpt':
cur_pred_embed = get_gpt_embedding(cur_pred)
else:
cur_pred_embed = get_bert_embedding(cur_pred)
embed_cache[cur_pred] = cur_pred_embed
for i in range(num_class):
if mode == 'accu':
score[i] = score[i] + cosine_similarity(cur_pred_embed, label_embed_list[i])
elif mode == 'max':
score[i] = max(score[i], cosine_similarity(cur_pred_embed, label_embed_list[i]))
cur_pred = np.argmax(score)
return cur_pred
num_sample = len(eval_data)
print('number of samples {:d}'.format(num_sample))
all_pred = np.zeros([num_sample, num_class])
all_truth = np.zeros([num_sample, num_class])
for i in range(num_sample):
cur_audio_id = eval_data[i]['audio_id']
if llm_task == 'emotion_cla':
cur_pred_list = [eval_data[i]['pred'].lstrip()] #.split(':')[-1].split('.')[0].lstrip()
cur_truth = eval_data[i]['ref'].split(':')[-1].split('.')[0].lstrip()
cur_truth_idx = list(label_dict.keys()).index(cur_truth)
cur_pred_idx = get_pred(cur_pred_list, label_dict)
if (cur_truth_idx==cur_pred_idx) == False:
print('Truth: ', cur_truth_idx, list(label_dict.keys())[cur_truth_idx], 'Pred: ', cur_pred_idx, list(label_dict.keys())[cur_pred_idx], cur_truth_idx==cur_pred_idx, cur_pred_list)
all_pred[i, cur_pred_idx] = 1.0
all_truth[i, cur_truth_idx] = 1.0
save_fold = "/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/{:s}_{:s}_{:s}_cla_report".format('.'.join(eval_file.split('/')[-1].split('.')[:-1]), llm_task, text_embed_setting)
if os.path.exists(save_fold) == False:
os.makedirs(save_fold)
np.save(save_fold + '/all_pred.npy', all_pred)
np.save(save_fold + '/all_truth.npy', all_truth)
stats = calculate_stats(all_pred, all_truth)
acc = stats[0]['acc']
np.savetxt(save_fold + '/result_summary.csv', [acc], delimiter=',')
embed_cache = json.dumps(embed_cache)
save_cache_path = '/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/embedding_cache/{:s}_{:s}_{:s}.json'.format(dataset, llm_task, text_embed_setting)
with open(save_cache_path, 'w') as f:
f.write(embed_cache)
sk_acc = accuracy_score(all_truth, all_pred)
print('{:s} accuracy: '.format(dataset), acc, sk_acc)
report = classification_report(all_truth, all_pred)
text_file_path = save_fold + "/cla_report.txt"
with open(text_file_path, 'w') as text_file:
text_file.write(report)
print(report)
all_res.append([eval_file, acc])
np.savetxt('/data/sls/scratch/yuangong/audiollm/src/llm/ltu_e/eval_res/summary/summary_iemocap_emotion_{:s}.csv'.format(time_string), all_res, delimiter=',', fmt='%s') | [] |
2024-01-10 | YuanGongND/ltu | src~ltu_as~qa_generation~gpt_qa_batch_libritts.py | # -*- coding: utf-8 -*-
# @Time : 4/29/23 3:04 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : gpt_qa.py
import openai
import numpy as np
import json
import re
import time
import pickle
import sys
split_id = sys.argv[1]
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
# by combining speech content, fundamental frequency, speed, and volume information
sys_prompt = """Based on the following speech, generate 20 different types of complex open-ended questions that require step-by-step thinking, and corresponding answers.
Only ask questions about the given speech. No general/background questions. Do not use the given information in the question.
Ask short, crispy, complex, and diverse questions. Answers need to be longer than 10 words.
Only ask questions that have a clear answer based on the given speech. The questions need to be of different types.
Questions can be e.g., What can be inferred from speech content, volume, speed, and speaker pitch and why; How speech content is related to speech volume, speed, and speaker pitch and why; What is the intent and implicit meaning of the speech and why; What is the potential scenario that the speech could happen and why; If the speech is special (e.g., urgent, funny, interesting, abnormal, unique, etc) and why; what mood the speech conveys based speech content and other information, etc.
Format each QA pair in a single line as a JSON dictionary (key "q" for question, and "a" for answer, wrapped with { and }). Do not include any other explanation."""
def generate_prompt(entry):
prompt = """Speech content: "{:s}"; Speaker gender: {:s}; Speaker's fundamental frequency (F0) is about {}Hz, so the pitch is {:s} among all people, and is {:s} for a {:s} speaker; Speech volume: {:s}; Speech speed: {:s}."""
def gender_f0_rank(pitch, gender):
if gender == 'male':
f0_percentiles = [95, 120, 135, 180]
elif gender == 'female':
f0_percentiles = [160, 200, 220, 270]
if pitch < f0_percentiles[0]:
pitch_gender_rank = 'very low (<{:d} Hz)'.format(f0_percentiles[0])
elif pitch < f0_percentiles[1]:
pitch_gender_rank = 'relatively low ({:d}-{:d} Hz)'.format(f0_percentiles[0], f0_percentiles[1])
elif pitch < f0_percentiles[2]:
pitch_gender_rank = 'medium ({:d}-{:d} Hz)'.format(f0_percentiles[1], f0_percentiles[2])
elif pitch < f0_percentiles[3]:
pitch_gender_rank = 'relatively high ({:d}-{:d} Hz)'.format(f0_percentiles[2], f0_percentiles[3])
else:
pitch_gender_rank = 'very high (>240 Hz)'.format(f0_percentiles[3])
return pitch_gender_rank
prompt = prompt.format(entry['text'], entry['gender'], entry['pitch_detail'], entry['pitch'], gender_f0_rank(entry['pitch_detail'], entry['gender']), entry['gender'], entry['energy'], entry['speed'])
return prompt
def decode_output(gpt_out):
#print(gpt_out)
gpt_out = gpt_out.replace('\n', '')
# gpt_out = gpt_out.replace("'q'", "\"q\"")
# gpt_out = gpt_out.replace("'a'", "\"a\"")
gpt_out = re.findall(r'\{.*?\}', gpt_out)
qa_list = [json.loads(x) for x in gpt_out]
return qa_list
def save_list_of_lists_to_disk(data, filename):
with open(filename, 'w') as f:
json.dump(data, f, indent=1)
#print(f"List of lists saved to {filename}.")
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(10))
def complete_with_backoff(**kwargs):
return openai.ChatCompletion.create(**kwargs)
def generate_qa(prompt_list, output_file, max_tokens=1024, total_n_completions=1, model_engine="gpt-3.5-turbo"):
model_engine, key_id = model_engine.split('_')[0], model_engine.split('_')[1]
if key_id == 'l':
# TODO: change to your own openai key
openai.api_key = 'your_openai_key'
elif key_id == 'g':
# TODO: change to your own openai key
openai.api_key = 'your_openai_key'
all_outputs = []
raw_outputs = []
used_token = 0
for prompt_pair in prompt_list:
try:
print(prompt_pair)
audio_id, prompt = prompt_pair[0], prompt_pair[1]
print(sys_prompt + '\n' + prompt)
response = complete_with_backoff(
model=model_engine,
messages=[{"role": "system", "content": sys_prompt},
{"role": "user", "content": prompt + '" {text}"'}], # }
max_tokens=max_tokens,
n=total_n_completions)
cur_completion = response.choices[0].message.content.strip()
raw_outputs.append([cur_completion])
used_token += len(cur_completion)/4 + len(prompt)/4
cur_prompt_outputs = decode_output(cur_completion)
for j in range(len(cur_prompt_outputs)):
try:
new_entry ={}
new_entry['audio_id'] = audio_id
new_entry['instruction'] = cur_prompt_outputs[j]['q']
new_entry['output'] = cur_prompt_outputs[j]['a']
all_outputs.append(new_entry)
except:
pass
if len(all_outputs) % 10 == 0:
print('{:d} questions generated, {:d} tokens'.format(len(all_outputs), int(used_token)))
with open(output_file, 'w') as f:
json.dump(all_outputs, f, indent=1)
save_list_of_lists_to_disk(raw_outputs, output_file[:-4]+'_raw.json')
except:
pass
with open(output_file, 'w') as f:
json.dump(all_outputs, f, indent=1)
model_list = ['gpt-3.5-turbo-0301_l', 'gpt-3.5-turbo-0301_g', 'gpt-3.5-turbo-0613_l', 'gpt-3.5-turbo-0613_g', "gpt-3.5-turbo_l", "gpt-3.5-turbo_g"]
model_engine= model_list[int(split_id) % len(model_list)]
with open('/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/speech_qa/open_end/datafiles/qa/libritts_train/{:s}.json'.format(split_id), 'r') as fp:
data = json.load(fp)
print(len(data))
prompt_list = [[data[i]['wav'], generate_prompt(data[i])] for i in range(len(data))]
begin = time.time()
generate_qa(prompt_list, '/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/speech_qa/open_end/datafiles/qa/libritts_train_out/libritts_{:s}.json'.format(split_id), model_engine=model_engine)
end = time.time()
print('time eclipse, ', end-begin) | [
"gender",
"pitch_detail",
"energy",
"Speech content: \"{:s}\"; Speaker gender: {:s}; Speaker's fundamental frequency (F0) is about {}Hz, so the pitch is {:s} among all people, and is {:s} for a {:s} speaker; Speech volume: {:s}; Speech speed: {:s}.",
"PLACEHOLDER\" {text}\"",
"Based on the following speech, generate 20 different types of complex open-ended questions that require step-by-step thinking, and corresponding answers. \nOnly ask questions about the given speech. No general/background questions. Do not use the given information in the question. \nAsk short, crispy, complex, and diverse questions. Answers need to be longer than 10 words.\nOnly ask questions that have a clear answer based on the given speech. The questions need to be of different types.\nQuestions can be e.g., What can be inferred from speech content, volume, speed, and speaker pitch and why; How speech content is related to speech volume, speed, and speaker pitch and why; What is the intent and implicit meaning of the speech and why; What is the potential scenario that the speech could happen and why; If the speech is special (e.g., urgent, funny, interesting, abnormal, unique, etc) and why; what mood the speech conveys based speech content and other information, etc. \nFormat each QA pair in a single line as a JSON dictionary (key \"q\" for question, and \"a\" for answer, wrapped with { and }). Do not include any other explanation."
] |
2024-01-10 | YuanGongND/ltu | src~ltu_as~qa_generation~gpt_qa_batch_iemocap.py | # -*- coding: utf-8 -*-
# @Time : 4/29/23 3:04 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : gpt_qa.py
import openai
import numpy as np
import json
import re
import time
import pickle
import sys
split_id = sys.argv[1]
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
# by combining speech content, fundamental frequency, speed, and volume information
sys_prompt = """Based on the following speech, generate 20 different types of complex open-ended questions that require step-by-step thinking, and corresponding answers.
Only ask questions about the given speech emotion. No general/background questions. Do not use the given information in the question.
Ask short, crispy, complex, and diverse questions. Answers need to be longer than 10 words.
Only ask questions that have a clear answer based on the given speech. The questions need to be of different types.
Questions can be e.g., what's the emotion of the speaker; why and how emotion is inferred from the speech content, f0, speed, and energy; what can be inferred from speech content and emotion; how speech content is related to the emotion; what is the intent and implicit meaning of the speech, etc.
Format each QA pair in a single line as a JSON dictionary (key "q" for question, and "a" for answer, wrapped with { and }). Do not include any other explanation."""
def generate_prompt(entry):
prompt = """Speech content: "{:s}"; Speaker gender: {:s}; Speaker's fundamental frequency (F0) is about {}Hz, so the pitch is {:s} among all people, and is {:s} for a {} speaker; Speech volume: {:s}; Speech speed: {:s}; Speech emotion: {:s}."""
def gender_f0_rank(pitch, gender):
if gender == 'male':
f0_percentiles = [95, 120, 135, 180]
elif gender == 'female':
f0_percentiles = [160, 200, 220, 270]
if pitch < f0_percentiles[0]:
pitch_gender_rank = 'very low (<{:d} Hz)'.format(f0_percentiles[0])
elif pitch < f0_percentiles[1]:
pitch_gender_rank = 'relatively low ({:d}-{:d} Hz)'.format(f0_percentiles[0], f0_percentiles[1])
elif pitch < f0_percentiles[2]:
pitch_gender_rank = 'medium ({:d}-{:d} Hz)'.format(f0_percentiles[1], f0_percentiles[2])
elif pitch < f0_percentiles[3]:
pitch_gender_rank = 'relatively high ({:d}-{:d} Hz)'.format(f0_percentiles[2], f0_percentiles[3])
else:
pitch_gender_rank = 'very high (>240 Hz)'.format(f0_percentiles[3])
return pitch_gender_rank
prompt = prompt.format(entry['text'], entry['gender'], entry['pitch_detail'], entry['pitch'], gender_f0_rank(entry['pitch_detail'], entry['gender']), entry['gender'], entry['energy'], entry['speed'], entry['emotion'])
return prompt
def decode_output(gpt_out):
#print(gpt_out)
gpt_out = gpt_out.replace('\n', '')
# gpt_out = gpt_out.replace("'q'", "\"q\"")
# gpt_out = gpt_out.replace("'a'", "\"a\"")
gpt_out = re.findall(r'\{.*?\}', gpt_out)
qa_list = [json.loads(x) for x in gpt_out]
return qa_list
def save_list_of_lists_to_disk(data, filename):
with open(filename, 'w') as f:
json.dump(data, f, indent=1)
#print(f"List of lists saved to {filename}.")
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(10))
def complete_with_backoff(**kwargs):
return openai.ChatCompletion.create(**kwargs)
def generate_qa(prompt_list, output_file, max_tokens=1024, total_n_completions=1, model_engine="gpt-3.5-turbo"):
model_engine, key_id = model_engine.split('_')[0], model_engine.split('_')[1]
if key_id == 'l':
# TODO: change to your own openai key
openai.api_key = 'your_openai_key'
elif key_id == 'g':
# TODO: change to your own openai key
openai.api_key = 'your_openai_key'
all_outputs = []
raw_outputs = []
used_token = 0
for prompt_pair in prompt_list:
try:
print(prompt_pair)
audio_id, prompt = prompt_pair[0], prompt_pair[1]
print(sys_prompt + '\n' + prompt)
response = complete_with_backoff(
model=model_engine,
messages=[{"role": "system", "content": sys_prompt},
{"role": "user", "content": prompt + '" {text}"'}], # }
max_tokens=max_tokens,
n=total_n_completions)
cur_completion = response.choices[0].message.content.strip()
raw_outputs.append([cur_completion])
used_token += len(cur_completion)/4 + len(prompt)/4
cur_prompt_outputs = decode_output(cur_completion)
for j in range(len(cur_prompt_outputs)):
new_entry ={}
new_entry['audio_id'] = audio_id
new_entry['instruction'] = cur_prompt_outputs[j]['q']
new_entry['output'] = cur_prompt_outputs[j]['a']
all_outputs.append(new_entry)
#print(new_entry)
if len(all_outputs) % 10 == 0:
print('{:d} questions generated, {:d} tokens'.format(len(all_outputs), int(used_token)))
with open(output_file, 'w') as f:
json.dump(all_outputs, f, indent=1)
save_list_of_lists_to_disk(raw_outputs, output_file[:-4]+'_raw.json')
except:
pass
with open(output_file, 'w') as f:
json.dump(all_outputs, f, indent=1)
model_list = ['gpt-3.5-turbo-0301_l', 'gpt-3.5-turbo-0301_g', 'gpt-3.5-turbo-0613_l', 'gpt-3.5-turbo-0613_g', "gpt-3.5-turbo_l", "gpt-3.5-turbo_g"]
model_engine= model_list[int(split_id) % len(model_list)]
print(model_engine)
with open('/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/speech_qa/open_end/datafiles/qa/iemocap_emotion_train/{:s}.json'.format(split_id), 'r') as fp:
data = json.load(fp)
print(len(data))
prompt_list = [[data[i]['wav'], generate_prompt(data[i])] for i in range(len(data))]
begin = time.time()
generate_qa(prompt_list, '/data/sls/scratch/yuangong/audiollm/src/data/prep_data_ltue/speech_qa/open_end/datafiles/qa/iemocap_emotion_train_out/iemocap_{:s}.json'.format(split_id), model_engine=model_engine)
end = time.time()
print('time eclipse, ', end-begin) | [
"gender",
"Based on the following speech, generate 20 different types of complex open-ended questions that require step-by-step thinking, and corresponding answers. \nOnly ask questions about the given speech emotion. No general/background questions. Do not use the given information in the question. \nAsk short, crispy, complex, and diverse questions. Answers need to be longer than 10 words.\nOnly ask questions that have a clear answer based on the given speech. The questions need to be of different types.\nQuestions can be e.g., what's the emotion of the speaker; why and how emotion is inferred from the speech content, f0, speed, and energy; what can be inferred from speech content and emotion; how speech content is related to the emotion; what is the intent and implicit meaning of the speech, etc. \nFormat each QA pair in a single line as a JSON dictionary (key \"q\" for question, and \"a\" for answer, wrapped with { and }). Do not include any other explanation.",
"Speech content: \"{:s}\"; Speaker gender: {:s}; Speaker's fundamental frequency (F0) is about {}Hz, so the pitch is {:s} among all people, and is {:s} for a {} speaker; Speech volume: {:s}; Speech speed: {:s}; Speech emotion: {:s}.",
"energy",
"PLACEHOLDER\" {text}\"",
"emotion",
"pitch_detail"
] |
2024-01-10 | daanalytics/Snowflake | python~streamlit~ST_5W1H-ChatGPT.py | import streamlit as st
import os
from dotenv import load_dotenv, find_dotenv
import openai
#import json
import re #regular expressions
# Set AI Model Engine & OpenAI API Key
ai_model_engine = 'gpt-3.5-turbo'
load_dotenv('openai_env.env')
openai.api_key = os.environ.get('OPENAI_API_KEY')
# Set Page Config
st.set_page_config(layout="wide")
st.title('Chatting with ChatGPT using the 5W1H Method')
# Set Side-bar
st.sidebar.image("https://github.com/daanalytics/Snowflake/blob/master/pictures/5W1H_ChatGPT_Blackbox.png?raw=true", use_column_width=True)
st.sidebar.title('5W1H Method')
st.sidebar.markdown(
"""
- **Who?**
- Understanding "who" can help in tailoring the language and complexity of the response.
- **What?**
- Specifying "what" ensures that the AI provides the type and format of information you desire.
- **Where?**
- Defining "where" can help in receiving region or context-specific answers.
- **Why?**
- Knowing "why" can help in determining the depth and angle of the AI's response.
- **When?**
- Framing "when" can help narrow down the context of the information provided.
- **How?**
- Clarifying "how" can guide the AI in structuring its answer in the most useful way.
"""
)
def main():
# 1. Collecting user input according to the 5W1H Method
# 2. Sending user input to ChatGPT function
# 3. Display the ChatGPT response
# Variable Who --> audience?
who = st.text_area('Who', help='Are you aiming the prompt for a software developer, a student, a researcher, or a layperson?')
# Variable What --> what should be done?
what = st.text_area('What', help='Are you looking for a detailed explanation, a summary, code, or maybe a list??')
# Variable Where --> where will the output be used?
where = st.text_area('Where', help='Are you asking about a concept''s application in a specific country, industry, or environment?')
# Variable Why --> what's the goal?
why = st.text_area('Why', help='Are you asking because you want a deep understanding, or are you trying to compare concepts?')
# Variable When --> when will 'it' happen?
when = st.text_area('When', help='Are you asking about historical events, contemporary issues, or future predictions?')
# Variable How --> style, structure, length, use of language, etc.
how = st.text_area('How', help='Are you seeking a step-by-step guide, an overview, or perhaps a methodological explanation?')
prompt_output = who + ' ' + what + ' ' + where + ' ' + why + ' ' + when + ' ' + how
# Submit Button
form = st.form(key='5W1H_form')
submit = form.form_submit_button(label='Submit')
if submit:
# Submit the prompt output to ChatGPT
openai_response = get_openai_response(prompt_output)
# Regular expression to extract topics and descriptions
pattern = r'\d+\.\s(.*?):\s(.*?)(?=\d+\.|$)'
topics_and_descriptions = re.findall(pattern, openai_response, re.S)
topic_num = 0
for topic, description in topics_and_descriptions:
topic_num = topic_num + 1
st.markdown('**'+str(topic_num)+'**' + ': ' + topic + ' - ' + description)
#st.json(openai_reponse)
def get_openai_response(prompt_output):
# Use this function to generate a ChatGPT response
# 1. Submit the prompt_output to the OpenAI API
# 2. Return the chat response
# This endpoint might change depending on OpenAI's updates
endpoint = "https://api.openai.com/v1/chat/completions"
headers = {
"Authorization": f"Bearer {openai.api_key}",
"Content-Type": "application/json",
}
chat_completion = openai.ChatCompletion.create(
model=ai_model_engine,
messages=[
{"role": "system", "content": "You are a community manager, skilled in writing informative blogposts about the subject area of Data & AI in general and the Snowflake Data Cloud in specific."},
{"role": "user", "content": prompt_output}
]
)
#openai_reponse = json.loads(chat_completion['choices'][0]['message']['content'])
openai_response = chat_completion['choices'][0]['message']['content']
#try:
# json_content = json.loads(openai_reponse)
#except json.JSONDecodeError:
# print(f"Failed to decode JSON from content: {openai_reponse}")
# # Handle error, e.g., set json_content to a default value or take some corrective action
# json_content = {}
return openai_response
# Execute the main function
main() | [
"You are a community manager, skilled in writing informative blogposts about the subject area of Data & AI in general and the Snowflake Data Cloud in specific.",
" "
] |
2024-01-10 | daanalytics/Snowflake | python~streamlit~chatgpt_cv~pages~chatgpt_cv_main.py | # Importing required packages
import streamlit as st
import openai
import os
# NLP Packages from textblob import TextBlob
import spacy from gensim. summarization. summarizer import summarize import nltk
nitk.download('punkt' )
# Sumy Summary Packages from sumy. parsers. plaintext import PlaintextParser from sumy.nlp.tokenizers import Tokenizer
from sumy. summarizers. lex_ rank import LexRankSummarizer
st.title("Chatting with ChatGPT")
st.sidebar.header("Instructions")
st.sidebar.info(
'''This is a web application that allows you to interact with
the OpenAI API's implementation of the ChatGPT model.
Enter a **query** in the **text box** and **press enter** to receive
a **response** from the ChatGPT
App is based on the following blogs:
https://blog.devgenius.io/building-a-chatgpt-web-app-with-streamlit-and-openai-a-step-by-step-tutorial-1cd57a57290b
'''
)
# Set the model engine and your OpenAI API key
model_engine = "text-davinci-003"
openai.api_key = "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" #follow step 4 to get a secret_key
def main():
'''
This function gets the user input, pass it to ChatGPT function and
displays the response
'''
# Get user input
user_query = st.text_input("Enter query here, to exit enter :q", "what is Python?")
if user_query != ":q" or user_query != "":
# Pass the query to the ChatGPT function
response = ChatGPT(user_query)
return st.write(f"{user_query} {response}")
def ChatGPT(user_query):
'''
This function uses the OpenAI API to generate a response to the given
user_query using the ChatGPT model
'''
# Use the OpenAI API to generate a response
completion = openai.Completion.create(
engine = model_engine,
prompt = user_query,
max_tokens = 1024,
n = 1,
temperature = 0.5,
)
response = completion.choices[0].text
return response
# call the main function
main()
| [] |
2024-01-10 | mivanovitch/Earning-Sage | retrivalQA.py | import sys
from dotenv import load_dotenv
load_dotenv()
from langchain.llms import OpenAI
from langchain.indexes import VectorstoreIndexCreator
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.chroma import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import RetrievalQA
import openai
import os
openai.api_base = os.environ["OPENAI_API_BASE"]
openai.api_key = os.environ["OPENAI_API_KEY"]
os.environ["LANGCHAIN_API_KEY"] = os.environ["LANGCHAIN_API_KEY"]
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.langchain.plus"
os.environ["LANGCHAIN_SESSION"] = os.environ["LANGCHAIN_SESSION"]
target_file = "./earning_reports/AAPL-89728-report.tsv"
def create_retriever(target_file):
loader = CSVLoader(target_file, csv_args={ 'delimiter': '\t' })
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=1024, chunk_overlap=0
)
docs = loader.load_and_split(text_splitter=text_splitter)
embeddings = OpenAIEmbeddings()
db = Chroma.from_documents(docs, embeddings)
return db.as_retriever()
def create_qa_retrival_chain():
foo_retriever = create_retriever(target_file)
llm = OpenAI(temperature=0)
qa = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=foo_retriever
)
return qa
def main():
if len(sys.argv) < 2:
print("Usage: python retrivalQA.py file_name")
return
argument = sys.argv[1]
print(f"Reading question list from: {argument}")
print('Loading LLM from', openai.api_base)
retrival_chain = create_qa_retrival_chain()
with open(argument, 'r') as file:
for line in file:
# Remove the newline character at the end of each line
line = line.strip()
if line == '':
continue
print("Questions :", line)
response = retrival_chain.run(line)
print("Answer :", response)
if __name__ == '__main__' :
main() | [] |
2024-01-10 | baileyg2016/dribbleDigest | store.py | from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.schema import Document
import os
from dotenv import load_dotenv
import json
from article import Article
load_dotenv()
# Load the JSON data
with open('samples/sample-articles.json', 'r') as f:
data = json.load(f)
# Create Article objects
articles = [Article(**article) for article in data]
documents = [
Document(
page_content=str(article),
metadata=article.to_dict()
)
for article in articles
]
embeddings = OpenAIEmbeddings()
# article_embeddings = [embeddings.(article.text) for article in articles]
vectorstore = Chroma.from_documents(documents, embeddings)
# vectorstore = Chroma.from_embeddings(article_embeddings)
def find_best_articles(prompts):
interesting_articles = {}
for prompt in prompts:
docs = vectorstore.similarity_search(prompt, k=5)
interesting_articles[prompt] = [doc.metadata for doc in docs]
return interesting_articles
prompts = ["49ers"]
interesting_articles = find_best_articles(prompts)
print('Total articles:', len(articles), '\n\n\n\n')
for prompt, articles in interesting_articles.items():
print(prompt, '\n')
for article in articles:
print(article['title'])
print('\n')
print('\n\n\n\n')
def generate_prompts():
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Generate some prompts related to NBA."}
]
)
return response['choices'][0]['message']['content'].split(', ')
# print(interesting_articles) | [
"['49ers']",
"You are a helpful assistant.",
"Generate some prompts related to NBA."
] |
2024-01-10 | baileyg2016/dribbleDigest | playground.py | from langchain import PromptTemplate, OpenAI, LLMChain
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema import Document
from langchain.vectorstores import Chroma
from langchain.retrievers import SVMRetriever
import json
from article import Article
from dotenv import load_dotenv
load_dotenv()
# Load the JSON data
with open('samples/sample-articles.json', 'r') as f:
data = json.load(f)
# Create Article objects
articles = [Article(**article) for article in data]
prompt_template = "What are the top 3 most interesting articles for someone who likes the NBA?"
documents = [
Document(
page_content=str(article),
metadata=article.to_dict(),
)
for article in articles
]
retriever = SVMRetriever.from_texts(
[article.title for article in articles],
OpenAIEmbeddings(),
)
embeddings = OpenAIEmbeddings()
# article_embeddings = [embeddings.(article.text) for article in articles]
vectorstore = Chroma.from_documents(documents, embeddings)
llm = OpenAI(temperature=0, model='gpt-4')
llm_chain = LLMChain(
llm=llm,
prompt=PromptTemplate.from_template(prompt_template),
retriever=retriever,
)
llm_chain.run(prompt_template)
# from langchain.document_loaders import TextLoader
# from langchain.text_splitter import CharacterTextSplitter
# from langchain.vectorstores import FAISS
# from langchain.embeddings import OpenAIEmbeddings
# from langchain.agents.agent_toolkits import create_retriever_tool, create_conversational_retrieval_agent
# from langchain.chat_models import ChatOpenAI
# from langchain.agents import AgentExecutor
# # Set up the retriever
# loader = TextLoader('path/to/your/documents.txt')
# documents = loader.load()
# text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
# texts = text_splitter.split_documents(documents)
# embeddings = OpenAIEmbeddings()
# db = FAISS.from_documents(texts, embeddings)
# retriever = db.as_retriever()
# # Create a retriever tool
# tool = create_retriever_tool(
# retriever,
# "retriever_name",
# "Description of the retriever"
# )
# tools = [tool]
# # Construct the agent
# llm = ChatOpenAI(temperature=0)
# agent_executor = create_conversational_retrieval_agent(llm, tools, verbose=True)
# # Test the agent
# result = agent_executor({"input": "your_input_here"})
| [
"What are the top 3 most interesting articles for someone who likes the NBA?"
] |
2024-01-10 | ymahlau/real_time_rl_project | src~envs~custom_lunar_lander.py | import time
from typing import Tuple
import gym
import numpy as np
import random
import pygame
from pygame import gfxdraw
# physics constants
GRAVITY = 1.62 # [m/s^2] moon gravity
UP_ACCELERATION = 2.5 # [m/s^2] acc of thruster upwards
SIDE_ACCELERATION = 2 # [m/s^2] acc of thruster sideways
PLAYGROUND_WIDTH = 200 # [m]
PLAYGROUND_HEIGHT = 100 # [m]
LANDING_PAD_WIDTH = 30
CRASH_THRESHOLD_X = 2 # [m/s] terminal sideways velocity on impact, which would make spacecraft fall over
CRASH_THRESHOLD_Y = 6 # [m/s] terminal velocity on impact
START_OFFSET_X = 10 # this outer region cannot be x start position
START_OFFSET_Y = 20 # start this offset lower than max height
# render constants
SCREEN_WIDTH = 750
SCREEN_HEIGHT = 500
EXPECTED_FPS = 100
OOB_REWARD = -50 # reward for out of bounds
SUCCESS_REWARD = 210
PAD_DISTANCE_REWARD = -3 # factor for landing closer to center
LONG_TIME_REWARD = -50 # reward if max time is elapsed
MAX_PLAY_TIME = 50
# Additional Rewards not used because it made the environment too difficult
CRASH_REWARD_X = 0 # reward for crashing by having too high sideways velocity on impact
CRASH_REWARD_Y = 0 # reward for crashing by falling to fast
SMOOTH_LANDING_FACTOR = 0 # Factor for rewarding smooth landing
BOOSTER_REWARD_FACTOR = 0 # Factor for constant negative reward while playing
class CustomLunarLander(gym.Env):
"""
Land a Lunar Lander on a landing pad on the moon. Variation of LunarLander-v2 from OpenAI-Gym:
https://github.com/openai/gym/blob/master/gym/envs/box2d/lunar_lander.py
The task is to land on a landing pad at position [0, 0] with a low falling velocity, while the lunar lander
is starting at a random x-position in the air.
Observation Space:
The observation space has entries in the following order:
0. x position in [-PLAYGROUND_WIDTH / 2, PLAYGROUND_WIDTH / 2]
1. y position in [0, PLAYGROUND_HEIGHT]
2. x velocity in [-inf, +inf], where right is positive direction
3. y velocity in [-inf, +inf], where up is positive direction
Action Space:
There are the following possible actions (all integer):
0. Do nothing
1. Thrust Left
2. Thrust Up
3. Thrust Right
Reward Space:
The only positive reward is obtained by hitting the landing pad. There is negative reward for crashing due to
out of bounds, crashing due to hitting ground with too high x or y velocity (set by threshold).
Additionally, a smooth negative reward proportional to the terminal x and y velocity is present.
A negative reward for using booster incentivizes the agent to finish fast.
"""
def __init__(self, step_size: float):
# gym
self.action_space = gym.spaces.Discrete(4)
self.observation_space = gym.spaces.Box(-np.inf, np.inf, shape=(4,), dtype=np.float32)
self.reward_range = (float('-inf'), 200)
self.state = None
self.step_size = step_size # seconds
self.num_steps = 0
# GUI only
self.screen = None
self.saved_action = None
self.frame = None
def reset(self) -> np.ndarray:
self.state = np.asarray(
[
random.randint(int(-PLAYGROUND_WIDTH / 2) + START_OFFSET_X,
int(PLAYGROUND_WIDTH / 2) - START_OFFSET_X),
PLAYGROUND_HEIGHT - START_OFFSET_Y,
0,
0,
], dtype=float)
self.num_steps = 0
return self.state
def step(self, action: int) -> Tuple[list, float, bool, dict]:
if self.state is None:
raise AssertionError("Environment is not reset yet.")
self.saved_action = action
# update velocities
if action == 1: # left
self.state[2] -= SIDE_ACCELERATION * self.step_size
elif action == 2: # Up
self.state[3] += UP_ACCELERATION * self.step_size
elif action == 3: # right
self.state[2] += SIDE_ACCELERATION * self.step_size
self.state[3] -= GRAVITY * self.step_size # gravity pulls down
# update positions
self.state[0] += self.step_size * self.state[2]
self.state[1] += self.step_size * self.state[3]
# check termination conditions
# left or right out of bounds
if self.state[0] < -PLAYGROUND_WIDTH / 2 or self.state[0] > PLAYGROUND_WIDTH / 2:
return self.state, OOB_REWARD, True, {}
# Above playground
if self.state[1] > PLAYGROUND_HEIGHT:
return self.state, OOB_REWARD, True, {}
# Landed
reward = 0
info = ''
if action != 0:
reward += BOOSTER_REWARD_FACTOR * self.step_size
if self.state[1] <= 0:
# test if landed on pad
if -LANDING_PAD_WIDTH / 2 < self.state[0] < LANDING_PAD_WIDTH / 2:
reward += SUCCESS_REWARD
info += ' -success'
# test if we crashed
if self.state[2] > CRASH_THRESHOLD_X or self.state[2] < -CRASH_THRESHOLD_X:
reward += CRASH_REWARD_X
info += ' -x_crash'
if self.state[3] < -CRASH_THRESHOLD_Y:
reward += CRASH_REWARD_Y
info += ' -y_crash'
# Additional smooth reward for landing softly in x and y direction, EDIT: only y
# reward += SMOOTH_LANDING_FACTOR * abs(self.state[2])
reward += SMOOTH_LANDING_FACTOR * abs(self.state[3])
reward += PAD_DISTANCE_REWARD * abs(self.state[0])
return self.state, reward, True, {'info_str': info}
# Test if max play time is over
if self.num_steps * self.step_size > MAX_PLAY_TIME:
reward += LONG_TIME_REWARD
reward += PAD_DISTANCE_REWARD * abs(self.state[0])
return self.state, reward, True, {}
# Nothing happened
self.num_steps += 1
return self.state, reward, False, {}
def render(self, mode: str = 'human'):
if self.frame is None:
pygame.init()
self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
canvas = pygame.Surface((SCREEN_WIDTH, SCREEN_HEIGHT))
canvas.fill((0, 0, 0))
xr = SCREEN_WIDTH / PLAYGROUND_WIDTH
yr = SCREEN_HEIGHT / PLAYGROUND_HEIGHT
# Lunar lander
size_on_screen = SCREEN_WIDTH / 40
gfxdraw.filled_circle(
canvas,
int((self.state[0] + PLAYGROUND_WIDTH / 2) * xr),
SCREEN_HEIGHT - int(self.state[1] * yr),
int(size_on_screen),
(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)),
)
# Exhaustion gases
if self.saved_action != 0:
offset_x = 0
offset_y = 0
if self.saved_action == 1:
offset_x = 3 / 2 * size_on_screen
elif self.saved_action == 2:
offset_y = 3 / 2 * size_on_screen
elif self.saved_action == 3:
offset_x = -3 / 2 * size_on_screen
gfxdraw.filled_circle(
canvas,
int((self.state[0] + PLAYGROUND_WIDTH / 2) * xr) + int(offset_x),
SCREEN_HEIGHT - int(self.state[1] * yr) + int(offset_y),
int(size_on_screen / 4),
(255, 255, 255),
)
# Landing pad
gfxdraw.filled_circle(
canvas,
int(LANDING_PAD_WIDTH / 2 * xr) + int(PLAYGROUND_WIDTH / 2 * xr),
SCREEN_HEIGHT,
int(SCREEN_WIDTH / 80),
(255, 0, 0),
)
gfxdraw.filled_circle(
canvas,
int(-LANDING_PAD_WIDTH / 2 * xr) + int(PLAYGROUND_WIDTH / 2 * xr),
SCREEN_HEIGHT,
int(SCREEN_WIDTH / 80),
(255, 0, 0),
)
# Finish
self.screen.blit(canvas, (0, 0))
pygame.display.flip()
time.sleep(1 / EXPECTED_FPS)
def close(self):
if self.frame is not None:
pygame.quit()
| [] |
2024-01-10 | evilpan/gptcli | gptcli.py | #!/usr/bin/env python3
import os
import enum
import json
import inspect
import argparse
import datetime
import requests
from functools import partial
from argparse import Namespace
from typing import List
from rich.console import Console
from rich.markdown import Markdown
from rich.live import Live
from rich.table import Table
import cmd2
from cmd2 import argparse_custom, with_argparser, Settable
import openai
class ContextLevel(enum.Enum):
NONE = 0
REQUEST = 1
FULL = 2
class Config:
sep = Markdown("---")
baseDir = os.path.dirname(os.path.realpath(__file__))
default = os.path.join(baseDir, "config.json")
mdSep = '\n\n' + '-' * 10 + '\n'
encodings = ["utf8", "gbk"]
def __init__(self, file=None) -> None:
self.cfg = {}
if file:
self.load(file)
def load(self, file):
with open(file, "r") as f:
self.cfg = json.load(f)
c: dict = self.cfg
self.api_key = c.get("api_key") or openai.api_key
self.api_base = c.get("api_base") or openai.api_base
self.api_type = c.get("api_type") or openai.api_type
self.api_version = c.get("api_version") or openai.api_version
self.api_organization = c.get("api_organization") or openai.organization
self.model = c.get("model", "gpt-3.5-turbo")
self.prompt = c.get("prompt", [])
self.stream = c.get("stream", False)
self.stream_render = c.get("stream_render", False)
self.context = ContextLevel(c.get("context", 0))
self.proxy = c.get("proxy", "")
self.showtokens = c.get("showtokens", False)
def get(self, key, default=None):
return self.cfg.get(key, default)
class GptCli(cmd2.Cmd):
prompt = "gptcli> "
def __init__(self, config):
super().__init__(
allow_cli_args=False,
allow_redirection=False,
shortcuts={},
persistent_history_file=os.path.expanduser("~/.gptcli_history"),
)
self.aliases[".exit"] = ".quit"
self.aliases[".config"] = ".set"
self.doc_header = "gptcli commands (use '.help -v' for verbose/'.help <topic>' for details):"
self.hidden_commands = [
"._relative_run_script", ".run_script", ".run_pyscript",
".eof", ".history", ".macro", ".shell", ".shortcuts", ".alias"]
for sk in ["allow_style", "always_show_hint", "echo", "feedback_to_output",
"max_completion_items", "quiet", "timing"]:
self.remove_settable(sk)
self.console = Console()
self.session = []
# Init config
self.print("Loading config from:", config)
self.config = Config(config)
for opt in ["key", "base", "type", "version", "organization"]:
opt = f"api_{opt}"
val = getattr(self.config, opt)
setattr(openai, opt, val)
if opt == "api_key" and len(val) > 7:
val = val[:7] + "*" * 5
self.print(f"openai.{opt}={val}")
if self.config.proxy:
self.print("Proxy:", self.config.proxy)
openai.proxy = self.config.proxy
self.print("Context level:", self.config.context)
self.print("Stream mode:", self.config.stream)
# Init settable
# NOTE: proxy is not settable in runtime since openai use pre-configured session
self.add_settable(Settable("api_key", str, "OPENAI_API_KEY", self.config, onchange_cb=self.openai_set))
self.add_settable(Settable("api_base", str, "OPENAI_API_BASE", self.config, onchange_cb=self.openai_set))
self.add_settable(Settable("api_type", str, "OPENAI_API_TYPE", self.config, onchange_cb=self.openai_set,
choices=("open_ai", "azure", "azure_ad", "azuread")))
self.add_settable(Settable("api_version", str, "OPENAI_API_VERSION", self.config, onchange_cb=self.openai_set))
self.add_settable(Settable("api_organization", str, "OPENAI_API_ORGANIZATION", self.config, onchange_cb=self.openai_set))
self.add_settable(Settable("context", lambda v: ContextLevel(int(v)), "Session context mode",
self.config, completer=partial(cmd2.Cmd.basic_complete, match_against="012")))
self.add_settable(Settable("stream", bool, "Enable stream mode", self.config))
self.add_settable(Settable("stream_render", bool, "Render live markdown in stream mode", self.config))
self.add_settable(Settable("model", str, "OPENAI model", self.config))
self.add_settable(Settable("showtokens", bool, "Show tokens used with the output", self.config))
# MISC
with self.console.capture() as capture:
self.print(f"[bold yellow]{self.prompt}[/]", end="")
self.prompt = capture.get()
self.single_tokens_used = 0
self.total_tokens_used = 0
def openai_set(self, param, old, new):
# self.print(f"openai.{param} = {old} -> {new}")
setattr(openai, param, new)
def onecmd_plus_hooks(self, line: str, *args, **kwargs) -> bool:
"""
Dirty hack to use Cmd2 as chat console, and avoid statement parsing
for chat input which may result in `No closing quotation` error.
"""
if line.startswith("."):
return super().onecmd_plus_hooks(line, *args, **kwargs)
self.handle_input(line)
return False
def default(self, statement: cmd2.Statement):
"""
for user input that startswith "." and not a recognized command,
treat it as chat instead of print error message.
"""
self.handle_input(statement.raw)
def cmd_func(self, command: str):
"""
Another hack to make command startswith "." and keep completer
"""
if command.startswith("."):
command = command[1:]
return super().cmd_func(command)
if inspect.currentframe().f_back.f_code.co_name == "_register_subcommands":
return super().cmd_func(command)
return None
def get_all_commands(self) -> List[str]:
return list(map(lambda c: f".{c}", super().get_all_commands()))
def print(self, *msg, **kwargs):
self.console.print(*msg, **kwargs)
def handle_input(self, content: str):
if not content:
return
self.session.append({"role": "user", "content": content})
if self.config.stream:
answer = self.query_openai_stream(self.messages)
else:
answer = self.query_openai(self.messages)
if not answer:
self.session.pop()
else:
self.session.append({"role": "assistant", "content": answer})
if self.config.showtokens:
self.console.log(f"Tokens used: {self.single_tokens_used}")
@property
def messages(self):
msgs = []
msgs.extend(self.config.prompt)
if self.config.context == ContextLevel.FULL:
msgs.extend(self.session)
elif self.config.context == ContextLevel.REQUEST:
msgs.extend([s for s in self.session if s["role"] != "assistant"])
else: # NO Context
msgs.append(self.session[-1])
return msgs
def load_session(self, file, mode="md", encoding=None, append=False):
if not append:
self.session.clear()
with open(file, "r", encoding=encoding) as f:
data = f.read()
if mode == "json":
self.session.extend(json.loads(data))
elif mode == "md":
for chat in data.split(Config.mdSep):
role, content = chat.split(": ", 1)
self.session.append({"role": role, "content": content})
self.print("Load {} records from {}".format(len(self.session), file))
def save_session(self, file, mode="md", encoding=None):
self.print("Save {} records to {}".format(len(self.session), file))
if mode == "json":
data = json.dumps(self.session, indent=2)
elif mode == "md":
chats = ["{}: {}".format(chat["role"], chat["content"])
for chat in self.session]
data = Config.mdSep.join(chats)
with open(file, "w", encoding=encoding) as f:
f.write(data)
# Reference:
# https://platform.openai.com/docs/guides/chat/managing-tokens
def num_tokens_from_messages(self, messages):
"""Returns the number of tokens used by a list of messages."""
import tiktoken
model = self.config.model
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
encoding = tiktoken.get_encoding("cl100k_base")
if model not in ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"]: # note: future models may deviate from this
self.print(f"""num_tokens_from_messages() is not presently implemented for model {model}.
See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
num_tokens = 0
for message in messages:
num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name": # if there's a name, the role is omitted
num_tokens += -1 # role is always required and always 1 token
num_tokens += 2 # every reply is primed with <im_start>assistant
return num_tokens
def query_openai(self, messages) -> str:
try:
response = openai.ChatCompletion.create(
model=self.config.model,
messages=messages
)
content = response["choices"][0]["message"]["content"]
self.print(Markdown(content), Config.sep)
self.single_tokens_used = response["usage"]["total_tokens"]
self.total_tokens_used += self.single_tokens_used
return content
except openai.error.OpenAIError as e:
self.print("OpenAIError:", e)
return ""
def query_openai_stream(self, messages) -> str:
answer = ""
try:
response = openai.ChatCompletion.create(
model=self.config.model,
messages=messages,
stream=True)
with Live(auto_refresh=False, vertical_overflow="visible") as lv:
for part in response:
finish_reason = part["choices"][0]["finish_reason"]
if "content" in part["choices"][0]["delta"]:
content = part["choices"][0]["delta"]["content"]
answer += content
if self.config.stream_render:
lv.update(Markdown(answer), refresh=True)
else:
lv.update(answer, refresh=True)
elif finish_reason:
if answer:
lv.update(Markdown(answer), refresh=True)
except KeyboardInterrupt:
self.print("Canceled")
except openai.error.OpenAIError as e:
self.print("OpenAIError:", e)
answer = ""
self.print(Config.sep)
self.single_tokens_used = self.num_tokens_from_messages(messages + [{"role": "assistant", "content": answer}])
self.total_tokens_used += self.single_tokens_used
return answer
parser_ml = argparse_custom.DEFAULT_ARGUMENT_PARSER()
@with_argparser(parser_ml)
def do_multiline(self, args):
"input multiple lines, end with ctrl-d(Linux/macOS) or ctrl-z(Windows). Cancel with ctrl-c"
contents = []
while True:
try:
line = input("> ")
except EOFError:
self.print("--- EOF ---")
break
except KeyboardInterrupt:
self.print("^C")
return
contents.append(line)
self.handle_input("\n".join(contents))
parser_reset = argparse_custom.DEFAULT_ARGUMENT_PARSER()
@with_argparser(parser_reset)
def do_reset(self, args):
"Reset session, i.e. clear chat history"
self.session.clear()
self.print("session cleared.")
parser_prompt = argparse_custom.DEFAULT_ARGUMENT_PARSER()
parser_prompt.add_argument("-c", dest="clear", action="store_true", help="remove current prompt")
parser_prompt.add_argument("file", nargs="?", help="prompt file to load, can be plaintext or json format",
completer=cmd2.Cmd.path_complete)
@with_argparser(parser_prompt)
def do_prompt(self, args: Namespace):
"Load different prompts"
if args.clear:
self.config.prompt.clear()
self.print("Prompt cleared.")
elif args.file:
prompt = []
if args.file.endswith(".json"):
self.print("Load prompt from json")
with open(args.file, "r") as f:
data = json.load(f)
if isinstance(data, list):
prompt.extend(data)
elif isinstance(data, dict):
prompt.append(data)
else:
self.print("Load prompt from text")
with open(args.file, "r") as f:
data = f.read().rstrip()
prompt.append(
{ "role": "system", "content": data }
)
self.print("Prompt loaded:", json.dumps(prompt, indent=2, ensure_ascii=False))
self.config.prompt = prompt
else:
self.print("Current prompt:", json.dumps(self.config.prompt, indent=2, ensure_ascii=False))
parser_save = argparse_custom.DEFAULT_ARGUMENT_PARSER()
parser_save.add_argument("-m", dest="mode", choices=["json", "md"],
default="md", help="save as json or markdown (default: md)")
parser_save.add_argument("-e", dest="encoding", choices=Config.encodings,
default=Config.encodings[0], help="file encoding")
parser_save.add_argument("file", help="target file to save",
completer=cmd2.Cmd.path_complete)
@with_argparser(parser_save)
def do_save(self, args: Namespace):
"Save current conversation to Markdown/JSON file"
self.save_session(args.file, args.mode, args.encoding)
parser_load = argparse_custom.DEFAULT_ARGUMENT_PARSER()
parser_load.add_argument("-a", dest="append", action="store_true",
help="append to current chat, by default current chat will be cleared")
parser_load.add_argument("-m", dest="mode", choices=["json", "md"],
default="md", help="load as json or markdown (default: md)")
parser_load.add_argument("-e", dest="encoding", choices=Config.encodings,
default=Config.encodings[0], help="file encoding")
parser_load.add_argument("file", help="target file to load",
completer=cmd2.Cmd.path_complete)
@with_argparser(parser_load)
def do_load(self, args: Namespace):
"Load conversation from Markdown/JSON file"
self.load_session(args.file, args.mode, args.encoding, args.append)
parser_usage = argparse_custom.DEFAULT_ARGUMENT_PARSER()
parser_usage.add_argument("-d", dest="days", type=int,
help="print usage of last n days")
parser_usage.add_argument("-b", dest="billing", action="store_true",
help="print detail of the billing subscription")
@with_argparser(parser_usage)
def do_usage(self, args: Namespace):
"Tokens usage of current session / last N days, or print detail billing info"
if args.days is None and not args.billing:
self.print(f"Total tokens used this session: {self.total_tokens_used}")
return
headers = {"Authorization": f"Bearer {self.config.api_key}"}
proxies = {}
if self.config.proxy:
proxies["http"] = self.config.proxy
proxies["https"] = self.config.proxy
if args.days:
end_date = datetime.datetime.now()
start_date = end_date - datetime.timedelta(args.days)
url = f"{self.config.api_base}/dashboard/billing/usage"
params = {
"start_date": str(start_date.date()),
"end_date": str(end_date.date()),
}
resp = requests.get(url, params=params, headers=headers, proxies=proxies)
if resp.status_code != 200 or "json" not in resp.headers["content-type"]:
self.print("Failed to get usage:", resp.status_code, resp.text)
return
js = resp.json()
daily_costs = js.get("daily_costs")
if not daily_costs:
self.print("json error:", js)
return
table = Table()
for i, cost in enumerate(daily_costs):
line_items = cost.get("line_items", [])
if i == 0:
table.add_column("time")
for item in line_items:
table.add_column(item["name"])
row = [datetime.datetime.fromtimestamp(cost["timestamp"])] + [item["cost"] for item in line_items]
table.add_row(*list(map(str, row)))
self.print(table)
self.print("total_usage", js.get("total_usage"))
elif args.billing:
url = f"{self.config.api_base}/dashboard/billing/subscription"
resp = requests.get(url, headers=headers, proxies=proxies)
self.console.print_json(resp.text)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-c", dest="config", help="path to config.json", default=Config.default)
args = parser.parse_args()
app = GptCli(args.config)
app.cmdloop()
if __name__ == '__main__':
main()
| [
"gptcli> ",
"[]"
] |
2024-01-10 | evilpan/gptcli | tests~test_live.py | #!/usr/bin/env python3
import time
import asyncio
from rich.console import Console
from rich.markdown import Markdown, MarkdownIt
from rich.live import Live
text = """\
As an AI language model, I don't have the ability to directly perform syntax highlighting, but I can show you an example of how to use Markdown syntax for code blocks:
```python
def welcome(name):
print(f"Hello, {name}!")
# Call the function
welcome("Alice")
```
This creates a code block with syntax highlighting for Python code.
To make this work, you need to include the name of the programming language immediately after the first set of backticks.
In this case, we've specified that we're writing Python code by including `python` after the backticks.
"""
c = Console()
def test_live():
md = Markdown("")
parser = MarkdownIt().enable("strikethrough")
parts = [text[i:i+5] for i in range(0, len(text), 5)]
with Live(md, refresh_per_second=4): # update 4 times a second to feel fluid
for part in parts:
time.sleep(0.1)
md.markup += part
md.parsed = parser.parse(md.markup)
"""
First:
{
"choices": [
{
"delta": {
"role": "assistant"
},
"finish_reason": null,
"index": 0
}
],
"created": 1677894010,
"id": "chatcmpl-6qBAYUFOkHB47std83P0djYO1DZHq",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk"
}
Middle:
{
"choices": [
{
"delta": {
"content": " zipfile"
},
"finish_reason": null,
"index": 0
}
],
...
}
Last:
{
"choices": [
{
"delta": {},
"finish_reason": "stop",
"index": 0
}
],
...
}
"""
async def test_stream():
import openai
openai.api_key = open(".key").read().strip()
# NOTE: aiohttp does not support socks5 proxy yet
# openai.proxy = "http://127.0.0.1:1080"
messages = [
{ "role": "system", "content": "Use triple backticks with the language name for every code block in your markdown response, if any." },
{ "role": "user", "content": "python example to unzip file to dir" },
]
async for part in await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=messages,
stream=True
):
finish_reason = part["choices"][0]["finish_reason"]
if "content" in part["choices"][0]["delta"]:
content = part["choices"][0]["delta"]["content"]
print(content, end="")
elif finish_reason:
print(finish_reason)
if __name__ == '__main__':
asyncio.run(test_stream()) | [
"Use triple backticks with the language name for every code block in your markdown response, if any.",
"python example to unzip file to dir"
] |
2024-01-10 | vaishnavijadhav1102/LogGPT | localGPT~run_localGPT.py | import logging
import click
import torch
from langchain.prompts import ChatPromptTemplate
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.prompt import PromptTemplate
from auto_gptq import AutoGPTQForCausalLM
from huggingface_hub import hf_hub_download
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.llms import HuggingFacePipeline, LlamaCpp
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from langchain.agents import Tool
from langchain.agents import AgentType
from langchain.memory import ConversationBufferMemory
from langchain import OpenAI
from langchain.utilities import SerpAPIWrapper
from langchain.agents import initialize_agent
# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
GenerationConfig,
LlamaForCausalLM,
LlamaTokenizer,
pipeline,
)
from langchain.agents import AgentType
from langchain.agents import initialize_agent, Tool
from constants import CHROMA_SETTINGS,CHROMA_SETTINGS_LOG, EMBEDDING_MODEL_NAME, PERSIST_DIRECTORY,LOG_DIRECTORY,PERSIST_LOG
def load_model(device_type, model_id, model_basename=None):
"""
Select a model for text generation using the HuggingFace library.
If you are running this for the first time, it will download a model for you.
subsequent runs will use the model from the disk.
Args:
device_type (str): Type of device to use, e.g., "cuda" for GPU or "cpu" for CPU.
model_id (str): Identifier of the model to load from HuggingFace's model hub.
model_basename (str, optional): Basename of the model if using quantized models.
Defaults to None.
Returns:
HuggingFacePipeline: A pipeline object for text generation using the loaded model.
Raises:
ValueError: If an unsupported model or device type is provided.
"""
logging.info(f"Loading Model: {model_id}, on: {device_type}")
logging.info("This action can take a few minutes!")
if model_basename is not None:
if ".ggml" in model_basename:
logging.info("Using Llamacpp for GGML quantized models")
model_path = hf_hub_download(repo_id=model_id, filename=model_basename)
max_ctx_size = 2048
kwargs = {
"model_path": model_path,
"n_ctx": max_ctx_size,
"max_tokens": max_ctx_size,
}
if device_type.lower() == "mps":
kwargs["n_gpu_layers"] = 1000
if device_type.lower() == "cuda":
kwargs["n_gpu_layers"] = 1000
kwargs["n_batch"] = max_ctx_size
return LlamaCpp(**kwargs)
else:
# The code supports all huggingface models that ends with GPTQ and have some variation
# of .no-act.order or .safetensors in their HF repo.
logging.info("Using AutoGPTQForCausalLM for quantized models")
if ".safetensors" in model_basename:
# Remove the ".safetensors" ending if present
model_basename = model_basename.replace(".safetensors", "")
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
logging.info("Tokenizer loaded")
model = AutoGPTQForCausalLM.from_quantized(
model_id,
model_basename=model_basename,
use_safetensors=True,
trust_remote_code=True,
device="cuda:0",
use_triton=False,
quantize_config=None,
)
elif (
device_type.lower() == "cuda"
): # The code supports all huggingface models that ends with -HF or which have a .bin
# file in their HF repo.
logging.info("Using AutoModelForCausalLM for full models")
tokenizer = AutoTokenizer.from_pretrained(model_id)
logging.info("Tokenizer loaded")
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="auto",
torch_dtype=torch.float16,
low_cpu_mem_usage=True,
trust_remote_code=True,
# max_memory={0: "15GB"} # Uncomment this line with you encounter CUDA out of memory errors
)
model.tie_weights()
else:
logging.info("Using LlamaTokenizer")
tokenizer = LlamaTokenizer.from_pretrained(model_id)
model = LlamaForCausalLM.from_pretrained(model_id)
# Load configuration from the model to avoid warnings
generation_config = GenerationConfig.from_pretrained(model_id)
# see here for details:
# https://huggingface.co/docs/transformers/
# main_classes/text_generation#transformers.GenerationConfig.from_pretrained.returns
# Create a pipeline for text generation
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_length=2048,
temperature=0,
top_p=0.95,
repetition_penalty=1.15,
generation_config=generation_config,
)
local_llm = HuggingFacePipeline(pipeline=pipe)
logging.info("Local LLM Loaded")
return local_llm
# chose device typ to run on as well as to show source documents.
@click.command()
@click.option(
"--device_type",
default="cuda" if torch.cuda.is_available() else "cpu",
type=click.Choice(
[
"cpu",
"cuda",
"ipu",
"xpu",
"mkldnn",
"opengl",
"opencl",
"ideep",
"hip",
"ve",
"fpga",
"ort",
"xla",
"lazy",
"vulkan",
"mps",
"meta",
"hpu",
"mtia",
],
),
help="Device to run on. (Default is cuda)",
)
@click.option(
"--show_sources",
"-s",
is_flag=True,
help="Show sources along with answers (Default is False)",
)
def main(device_type, show_sources):
"""
This function implements the information retrieval task.
1. Loads an embedding model, can be HuggingFaceInstructEmbeddings or HuggingFaceEmbeddings
2. Loads the existing vectorestore that was created by inget.py
3. Loads the local LLM using load_model function - You can now set different LLMs.
4. Setup the Question Answer retreival chain.
5. Question answers.
"""
log_to_english_prompt= """\
Use the following pieces of context to answer the question at the end.
The text enclosed in '<','>' are the log format that are faulty. The logs includes IP address, timestamp, request details, response code, referer URL, user agent, and additional information.
The text enclosed in '*','*' are the classifications of the log errors.
Your task is to identify and classify whether each log entry is valid or invalid according to the provided security incident types and their characteristics. Also give you the error or the classified error.
<198.51.100.10 - - [023:16:20:05 +0000] "POST /login/authenticate HTTP/1.1" 401 123 "https://www.example.com/login" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.1234.567 Safari/537.36" "-">
*This is an example of Repeated Failed Login Attempts*
<172.16.0.15 - - [22/Jan/2023:18:10:30 +0000] "GET /api/admin/settings HTTP/1.1" 401 789 "https://www.example.com/admin" "MyCustomApp/1.0" "-">
*This is an example of unauthorized API access*
<172.16.0.15 - - [22/Jan/2023:18:10:30 +0000] "GET /api/admin/settings HTTP/1.1" 401 789 "https://www.example.com/admin" "MyCustomApp/1.0" "-">
*This is an example of unauthorized API access*
<192.168.1.20 - - [22/Jan/2023:12:30:15 +0000] "GET /financial_reports/confidential_report.pdf HTTP/1.1" 403 12345 "https://www.something.com/restricted_area" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.1234.567 Safari/537.36" "-">
*This is an example of accessing Restricted Financial Data*
<192.168.1.20 - - [22/Jan/2023:12:30:15 +0000] "GET /financial/confidential_report.pdf HTTP/1.1" 403 12345 "https://www.youtube.com/restricted_area" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.1234.567 Safari/537.36" "-">
*This is an example of accessing Restricted Financial Data*
<66.249.66.91 - - [22/Jan/2019:03:56:20 +0330] "GET /filter/b874%2Cb32%2Cb63%2Cb99%2Cb126%2Cb820%2Cb249%2Cb3%2Cb148%2Cb724%2Cb613%2Cb183%2Cb213%2Cb484%2Cb224%2Cb734%2Cb20%2Cb95%2Cb542%2Cb212%2Cb485%2Cb523%2Cb221%2Cb118%2Cb186%2Cb67?page=<script>alert('Reflected XSS')</script> HTTP/1.1" 403 39660 "-" "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)" "-">
*This is an example of Cross Site Scripting*
<2.177.12.140 - - [22/Jan/2019:03:56:25 +0330] "GET /static/images/amp/third-party/footer-mobile.png HTTP/1.1" 403 62894 "<script>alert('Reflected XSS')</script>" "Mozilla/5.0 (Android 7.1.1; Mobile; rv:64.0) Gecko/64.0 Firefox/64.0" "-">
*This is an example of Cross Site Scripting*
<31.56.96.51 - - [22/Jan/2019:03:56:16 +0330] "POST /change-password HTTP/1.1" 403 1530 "https://www.zanbil.ir/profile" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36" "-">
*"answer": "This is an example of cross site request forgery*
<2.179.141.98 - - [22/Jan/2019:03:56:45 +0330] "POST /change-profile-settings HTTP/1.1" 403 5409 "https://malicious-site.com/evil-page" "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" "-">
*This is an example of cross site request forgery*
<31.56.96.51 - - [22/Jan/2019:03:56:16 +0330] "GET /users/1/credit-card HTTP/1.1" 401 1530 "https://www.zanbil.ir/users/1" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36" "-">
*This is an example of Sensitive data exposure*
<5.211.97.39 - - [22/Jan/2019:03:56:57 +0330] "GET /view-file?file=../../../etc/shadow HTTP/1.1" 401 6934 "https://www.zanbil.ir/m/browse/meat-grinder/%DA%86%D8%B1%D8%AE-%DA%AF%D9%88%D8%B4%D8%AA" "Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_2 like Mac OS X) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.0 Mobile/14F89 Safari/602.1" "-">
*This is an example of Sensitive data exposure*
<172.16.0.15 - - [22/Jan/2023:18:10:30 +0000] "GET /api/admin/settings HTTP/1.1" 401 789 "https://www.example.com/admin" "MyCustomApp/1.0" "-">
*This is an example of unauthorized API access*
{context}
Later on, use the Policy Check tool to get the context and policy broken or violated.
Answer:
"""
# {answer} This is an example of File Inclusion Exploit
# 31.56.96.51 - - [22/Jan/2019:03:56:16 +0330] "GET /include?file=config.php HTTP/1.1" 404 5667 "https://www.zanbil.ir/include?file=config.php" "Mozilla/5.0 (Linux; Android 6.0; ALE-L21 Build/HuaweiALE-L21) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.158 Mobile Safari/537.36" "-"
# 66.111.54.249 - - [22/Jan/2019:03:56:45 +0330] "GET /view-file?file=../../../etc/passwd HTTP/1.1" 200 3744 "https://www.zanbil.ir/m/browse/refrigerator-and-freezer/%DB%8C%D8%AE%DA%86%D8%A7%D9%84-%D9%81%D8%B1%DB%8C%D8%B2%D8%B1" "Mozilla/5.0 (Linux; Android 5.0; SM-G900H Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.93 Mobile Safari/537.36" "-"
# {answer} This is an example of Distributed Denial of Service
# 31.56.96.51 - - [22/Jan/2019:03:56:16 +0330] "GET / HTTP/1.1" 503 5667 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36" "-"
# 5.211.97.39 - - [22/Jan/2019:03:56:58 +0330] "GET /image/attack-target HTTP/1.1" 404 0 "https://www.malicious-site.com/ddos-tool" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36" "-"
# 5.160.157.20 - - [22/Jan/2019:04:11:49 +0330] "GET /private/filter?f=p71&page=6 HTTP/1.1" 405 178 "-" "Mozilla/5.0 (Windows NT 5.1; rv:8.0) Gecko/20100101 Firefox/8.0" "-"
# {answer} This is an example of Session Hijacking
# 31.56.96.51 - - [22/Jan/2019:03:56:16 +0330] "GET /dashboard HTTP/1.1" 404 5667 "https://www.zanbil.ir/dashboard" "Mozilla/5.0 (Linux; Android 6.0; ALE-L21 Build/HuaweiALE-L21) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.158 Mobile Safari/537.36" "-"
# {answer} This is an example of log tampering
# 31.56.96.51 - - [22/Jan/2019:03:56:16 +0330] "GET /logs/access.log HTTP/1.1" 404 5667 "https://www.zanbil.ir/logs" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36" "-"
# 5.209.200.218 - - [22/Jan/2019:03:56:59 +0330] "GET /logs/access.log HTTP/1.1" 404 60795 "https://www.zanbil.ir/m/filter/b99%2Cp4510%2Cstexists%2Ct116" "Mozilla/5.0 (Linux; Android 5.1.1; SM-G361H Build/LMY48B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.91 Mobile Safari/537.36" "-"
# {answer} This is an example of an unusual user agent string
# 31.56.96.51 - - [22/Jan/2019:03:56:16 +0330] "GET / HTTP/1.1"
# 66.111.54.249 - - [22/Jan/2019:03:57:02 +0330] "GET /static/images/amp/third-party/footer-mobile.png HTTP/1.1" 200 62894 "https://www.zanbil.ir/m/browse/refrigerator-and-freezer/%DB%8C%D8%AE%DA%86%D8%A7%D9%84-%D9%81%D8%B1%DB%8C%D8%B2%D8%B1" "Mozilla/5.0 (Windows NT 10.0; Win64; x64; Trident/7.0; AS; rv:11.0) like Gecko" "-"
# 66.249.66.194 - - [22/Jan/2019:04:11:41 +0330] "GET /filter/p10%2Cv1%7C%D8%B3%D8%A8%D8%B2%20%DA%A9%D8%B1%D9%85%2Cv1%7C%D9%85%D8%B4%DA%A9%DB%8C?productType=tea-maker HTTP/1.1" 200 32234 "-" "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)" "-"
# 54.36.148.55 - - [22/Jan/2019:04:11:42 +0330] "GET /filter/b114,b18 HTTP/1.1" 403 36164 "-" "Mozilla/5.0 (compatible; AhrefsBot/6.1; +http://ahrefs.com/robot/)" "-"
# """
# log_prompt = PromptTemplate(input_variables=["question", "answer"], template="Question: {question}\n{answer}")
# log_prompt = FewShotPromptTemplate(
# examples=log_to_english_prompt,
# example_prompt= log_prompt,
# suffix="Question: {input}",
# input_variables=["input"]
# )
logging.info(f"Running on: {device_type}")
logging.info(f"Display Source Documents set to: {show_sources}")
embeddings = HuggingFaceInstructEmbeddings(model_name=EMBEDDING_MODEL_NAME, model_kwargs={"device": device_type})
# uncomment the following line if you used HuggingFaceEmbeddings in the ingest.py
# embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)
log_db = Chroma(persist_directory =PERSIST_LOG,)
# load the vectorstore
db = Chroma(
persist_directory=PERSIST_DIRECTORY,
embedding_function=embeddings,
client_settings=CHROMA_SETTINGS,
)
retriever = db.as_retriever()
retriever_log = log_db.as_retriever()
# load the LLM for generating Natural Language responses
# for HF models
# model_id = "TheBloke/vicuna-7B-1.1-HF"
# model_basename = None
# model_id = "TheBloke/Wizard-Vicuna-7B-Uncensored-HF"
# model_id = "TheBloke/guanaco-7B-HF"
# model_id = 'NousResearch/Nous-Hermes-13b' # Requires ~ 23GB VRAM. Using STransformers
# alongside will 100% create OOM on 24GB cards.
# llm = load_model(device_type, model_id=model_id)
# for GPTQ (quantized) models
# model_id = "TheBloke/Nous-Hermes-13B-GPTQ"
# model_basename = "nous-hermes-13b-GPTQ-4bit-128g.no-act.order"
# model_id = "TheBloke/WizardLM-30B-Uncensored-GPTQ"
# model_basename = "WizardLM-30B-Uncensored-GPTQ-4bit.act-order.safetensors" # Requires
# ~21GB VRAM. Using STransformers alongside can potentially create OOM on 24GB cards.
# model_id = "TheBloke/wizardLM-7B-GPTQ"
# model_basename = "wizardLM-7B-GPTQ-4bit.compat.no-act-order.safetensors"
# model_id = "TheBloke/WizardLM-7B-uncensored-GPTQ"
# model_basename = "WizardLM-7B-uncensored-GPTQ-4bit-128g.compat.no-act-order.safetensors"
# for GGML (quantized cpu+gpu+mps) models - check if they support llama.cpp
# model_id = "TheBloke/wizard-vicuna-13B-GGML"
# model_basename = "wizard-vicuna-13B.ggmlv3.q4_0.bin"
# model_basename = "wizard-vicuna-13B.ggmlv3.q6_K.bin"
# model_basename = "wizard-vicuna-13B.ggmlv3.q2_K.bin"
# model_id = "TheBloke/orca_mini_3B-GGML"
# model_basename = "orca-mini-3b.ggmlv3.q4_0.bin"
# model_id = "TheBloke/Llama-2-7B-Chat-GGML"
# model_basename = "llama-2-7b-chat.ggmlv3.q4_0.bin"
# model_basename = "orel12/ggml-gpt4all-j-v1.3-groovy"
from gpt4all import GPT4All
model = GPT4All("orca-mini-3b.ggmlv3.q4_0.bin")
template = """\
Use the following pieces of context to answer the question at the end. If you don't know the answer,\
just say that you don't know, don't try to make up an answer.
Action Input: the input to the action. Enhance the query such that it can improve the performance of the model question answering model. Let's first understand the problem and devise a plan to solve the problem. Please output the plan starting with the header 'Plan:' and then followed by a numbered list of steps.to accurately complete the task. If the task is a question,the final step should almost always be 'Given the above steps taken,please respond to the users original question'.
Then. self reflect on your answer, find faults and revise.
Use tools for any context and knowledge base.
Analyze if it seems you would like to know more on the responses and if you would like to revisit any specific aspect
or have any further questions, please let revise.
Final Answer: the final answer to the original input question. Show the final answer or response to the user with '$answer....$' in this manner. so as to rectify that it is the final answer.
{context}
{history}
Question: {question}
Helpful Answer:"""
prompt_log = PromptTemplate(input_variables=["context"], template=log_to_english_prompt)
prompt = PromptTemplate(input_variables=["history", "context", "question"], template=template)
memory = ConversationBufferMemory(input_key="question", memory_key="history")
# llm = load_model(device_type,model_id=model_id,model_basename=model_basename)
qa = RetrievalQA.from_chain_type(
llm=model,
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": prompt, "memory": memory},
)
qa_log = RetrievalQA.from_chain_type(
llm=model,
chain_type="stuff",
retriever=retriever_log,
return_source_documents=True,
chain_type_kwargs={"memory": memory},
)
tools = [
Tool(
name = "Policy Check ",
func=qa.run,
description="Use when the input is in english language and use for retreiving policies and rules of the company to answer the query"
),
Tool(
name="Log Check",
func=qa_log.run,
description='Use when the input is in the form <<5.123.174.57 - - [22/Jan/2019:04:04:39 +0330] "GET /image/20135?name=1020.jpg&wh=200x200 HTTP/1.1" 200 4908 "-" "Dalvik/2.1.0 (Linux; U; Android 8.1.0; SM-J710F Build/M1AJQ)" "-">> to form english meaning and to monitor any security breaches in the log ',
),
]
agent = initialize_agent(
tools, model, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True,prompt=prompt_log
)
# Interactive questions and answers
while True:
query = input("\nEnter a query: ")
if query == "exit":
break
# Get the answer from the chain
res = agent.run(input=query)
answer, docs = res["result"], res["source_documents"]
# Print the result
print("\n\n> Question:")
print(query)
print("\n> Answer:")
print(answer)
if show_sources: # this is a flag that you can set to disable showing answers.
# # Print the relevant sources used for the answer
print("----------------------------------SOURCE DOCUMENTS---------------------------")
for document in docs:
print("\n> " + document.metadata["source"] + ":")
print(document.page_content)
print("----------------------------------SOURCE DOCUMENTS---------------------------")
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s", level=logging.INFO
)
main()
| [
"POST /change-profile-settings HTTP/1.1",
"$answer....$",
"GET /api/admin/settings HTTP/1.1",
"GET /filter/b874%2Cb32%2Cb63%2Cb99%2Cb126%2Cb820%2Cb249%2Cb3%2Cb148%2Cb724%2Cb613%2Cb183%2Cb213%2Cb484%2Cb224%2Cb734%2Cb20%2Cb95%2Cb542%2Cb212%2Cb485%2Cb523%2Cb221%2Cb118%2Cb186%2Cb67?page=<script>alert('Reflected XSS')</script> HTTP/1.1",
"t know the answer, just say that you don",
"https://www.something.com/restricted_area",
"Mozilla/5.0 (Android 7.1.1; Mobile; rv:64.0) Gecko/64.0 Firefox/64.0",
"answer",
"GET /view-file?file=../../../etc/shadow HTTP/1.1",
"POST /login/authenticate HTTP/1.1",
"https://www.example.com/login",
" Use the following pieces of context to answer the question at the end. \n The text enclosed in '<','>' are the log format that are faulty. The logs includes IP address, timestamp, request details, response code, referer URL, user agent, and additional information.\n The text enclosed in '*','*' are the classifications of the log errors. \n Your task is to identify and classify whether each log entry is valid or invalid according to the provided security incident types and their characteristics. Also give you the error or the classified error.\n\n <198.51.100.10 - - [023:16:20:05 +0000] \"POST /login/authenticate HTTP/1.1\" 401 123 \"https://www.example.com/login\" \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.1234.567 Safari/537.36\" \"-\">\n *This is an example of Repeated Failed Login Attempts*\n\n <172.16.0.15 - - [22/Jan/2023:18:10:30 +0000] \"GET /api/admin/settings HTTP/1.1\" 401 789 \"https://www.example.com/admin\" \"MyCustomApp/1.0\" \"-\">\n *This is an example of unauthorized API access*\n \n <172.16.0.15 - - [22/Jan/2023:18:10:30 +0000] \"GET /api/admin/settings HTTP/1.1\" 401 789 \"https://www.example.com/admin\" \"MyCustomApp/1.0\" \"-\">\n *This is an example of unauthorized API access*\n \n <192.168.1.20 - - [22/Jan/2023:12:30:15 +0000] \"GET /financial_reports/confidential_report.pdf HTTP/1.1\" 403 12345 \"https://www.something.com/restricted_area\" \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.1234.567 Safari/537.36\" \"-\">\n *This is an example of accessing Restricted Financial Data*\n \n <192.168.1.20 - - [22/Jan/2023:12:30:15 +0000] \"GET /financial/confidential_report.pdf HTTP/1.1\" 403 12345 \"https://www.youtube.com/restricted_area\" \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.1234.567 Safari/537.36\" \"-\">\n *This is an example of accessing Restricted Financial Data*\n \n <66.249.66.91 - - [22/Jan/2019:03:56:20 +0330] \"GET /filter/b874%2Cb32%2Cb63%2Cb99%2Cb126%2Cb820%2Cb249%2Cb3%2Cb148%2Cb724%2Cb613%2Cb183%2Cb213%2Cb484%2Cb224%2Cb734%2Cb20%2Cb95%2Cb542%2Cb212%2Cb485%2Cb523%2Cb221%2Cb118%2Cb186%2Cb67?page=<script>alert('Reflected XSS')</script> HTTP/1.1\" 403 39660 \"-\" \"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)\" \"-\">\n *This is an example of Cross Site Scripting*\n \n <2.177.12.140 - - [22/Jan/2019:03:56:25 +0330] \"GET /static/images/amp/third-party/footer-mobile.png HTTP/1.1\" 403 62894 \"<script>alert('Reflected XSS')</script>\" \"Mozilla/5.0 (Android 7.1.1; Mobile; rv:64.0) Gecko/64.0 Firefox/64.0\" \"-\">\n *This is an example of Cross Site Scripting*\n\n <31.56.96.51 - - [22/Jan/2019:03:56:16 +0330] \"POST /change-password HTTP/1.1\" 403 1530 \"https://www.zanbil.ir/profile\" \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36\" \"-\">\n *\"answer\": \"This is an example of cross site request forgery*\n \n <2.179.141.98 - - [22/Jan/2019:03:56:45 +0330] \"POST /change-profile-settings HTTP/1.1\" 403 5409 \"https://malicious-site.com/evil-page\" \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36\" \"-\">\n *This is an example of cross site request forgery*\n \n <31.56.96.51 - - [22/Jan/2019:03:56:16 +0330] \"GET /users/1/credit-card HTTP/1.1\" 401 1530 \"https://www.zanbil.ir/users/1\" \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36\" \"-\">\n *This is an example of Sensitive data exposure*\n \n <5.211.97.39 - - [22/Jan/2019:03:56:57 +0330] \"GET /view-file?file=../../../etc/shadow HTTP/1.1\" 401 6934 \"https://www.zanbil.ir/m/browse/meat-grinder/%DA%86%D8%B1%D8%AE-%DA%AF%D9%88%D8%B4%D8%AA\" \"Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_2 like Mac OS X) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.0 Mobile/14F89 Safari/602.1\" \"-\">\n *This is an example of Sensitive data exposure*\n \n <172.16.0.15 - - [22/Jan/2023:18:10:30 +0000] \"GET /api/admin/settings HTTP/1.1\" 401 789 \"https://www.example.com/admin\" \"MyCustomApp/1.0\" \"-\">\n *This is an example of unauthorized API access*\n\n {context}\n Later on, use the Policy Check tool to get the context and policy broken or violated.\n\n Answer:\n ",
"question",
"POST /change-password HTTP/1.1",
"https://www.youtube.com/restricted_area",
"https://www.zanbil.ir/m/browse/meat-grinder/%DA%86%D8%B1%D8%AE-%DA%AF%D9%88%D8%B4%D8%AA",
"GET /users/1/credit-card HTTP/1.1",
" and then followed by a numbered list of steps.to accurately complete the task. If the task is a question,the final step should almost always be ",
"GET /static/images/amp/third-party/footer-mobile.png HTTP/1.1",
"https://www.example.com/admin",
"context",
"https://www.zanbil.ir/users/1",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
"<script>alert('Reflected XSS')</script>",
"GET /financial_reports/confidential_report.pdf HTTP/1.1",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.1234.567 Safari/537.36",
" Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n Action Input: the input to the action. Enhance the query such that it can improve the performance of the model question answering model. Let's first understand the problem and devise a plan to solve the problem. Please output the plan starting with the header 'Plan:' and then followed by a numbered list of steps.to accurately complete the task. If the task is a question,the final step should almost always be 'Given the above steps taken,please respond to the users original question'.\n Then. self reflect on your answer, find faults and revise.\n Use tools for any context and knowledge base. \n\n Analyze if it seems you would like to know more on the responses and if you would like to revisit any specific aspect\n or have any further questions, please let revise.\n Final Answer: the final answer to the original input question. Show the final answer or response to the user with '$answer....$' in this manner. so as to rectify that it is the final answer.\n\n {context}\n\n {history}\n Question: {question}\n Helpful Answer:",
"MyCustomApp/1.0",
"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
"https://www.zanbil.ir/profile",
"https://malicious-site.com/evil-page",
"GET /financial/confidential_report.pdf HTTP/1.1",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36",
"s first understand the problem and devise a plan to solve the problem. Please output the plan starting with the header ",
"Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_2 like Mac OS X) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.0 Mobile/14F89 Safari/602.1"
] |
2024-01-10 | MantisAI/prompt_engineering | prompts~models.py | from asyncio.log import logger
from functools import partial
import os
from transformers import (
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
T5ForConditionalGeneration,
AutoTokenizer,
AutoConfig,
)
from accelerate import (
init_empty_weights,
load_checkpoint_and_dispatch,
infer_auto_device_map,
)
import requests
import openai
import torch
import gc
import cohere
import time
import re
MODEL_SETTINGS = {
"google/flan-t5-xxl": {
"checkpoint": "flan-t5-xxl",
"no_split_module_classes": ["T5Block"],
},
"EleutherAI/gpt-j-6B": {
"checkpoint": "sharded-gpt-j-6B",
"no_split_module_classes": ["GPTJBlock"],
},
"facebook/opt-30b": {
"checkpoint": "opt-30b",
"no_split_module_classes": ["OPTDecoderLayer"],
},
"facebook/opt-13b": {
"checkpoint": "opt-13b",
"no_split_module_classes": ["OPTDecoderLayer"],
},
"facebook/opt-125m": {
"checkpoint": "opt-125m",
"no_split_module_classes": ["OPTDecoderLayer"],
},
"t5-11b": {"checkpoint": "t5-11b", "no_split_module_classes": ["T5Block"]},
"ul2": {"checkpoint": "ul2", "no_split_module_classes": ["T5Block"]},
}
def gpt3(prompt, max_tokens=3, model_name="text-ada-001"):
openai.api_key = os.environ.get("OPENAI_API_KEY")
response = openai.Completion.create(
model=model_name,
prompt=prompt,
temperature=0.1,
max_tokens=max_tokens,
top_p=0.75,
frequency_penalty=0,
presence_penalty=0,
)
return response["choices"][0]["text"]
def hf_inference_api(prompt, model_name, max_tokens=3):
API_URL = f"https://api-inference.huggingface.co/models/{model_name}"
query_input = {
"inputs": prompt,
}
if model_name not in ["bigscience/T0pp"]:
query_input["parameters"] = {
"max_new_tokens": max_tokens,
"return_full_text": False,
"top_p": 0.9,
"temperature": 0.1,
}
headers = {"Authorization": f"Bearer {os.environ.get('HF_API_KEY')}"}
response = requests.post(API_URL, headers=headers, json=query_input)
output = response.json()
if type(output) != list:
logger.error("Bad response from HF.")
logger.error(output)
return False
output = output[0]["generated_text"]
if output.find(prompt) >= 0:
output = output.replace(prompt, "")
else:
output = output[len(prompt) - 30 :]
last_word = prompt[prompt.rfind(" ") + 1 :]
output = output[output.find(last_word) + len(last_word) :]
return output
def local_inference_api(prompt, max_tokens=3):
API_URL = f"http://localhost:8000/inference"
query_input = {"prompt": prompt, "max_length": max_tokens}
response = requests.post(API_URL, json=query_input)
output = response.json()
output = output["output"]
if output.find(prompt) >= 0:
output = output.replace(prompt, "")
else:
last_word = prompt[prompt.rfind(" ") + 1 :]
if output.find(last_word) >= 0:
output = output[len(prompt) - 30 :]
output = output[output.find(last_word) + len(last_word) :]
return output
def gooseai_inference_api(prompt, max_tokens=3):
API_URL = f"https://api.goose.ai/v1/engines/gpt-neo-20b/completions"
query_input = {
"prompt": prompt,
"max_tokens": max_tokens,
"top_p": 0.9,
"temperature": 0.1,
}
headers = {"Authorization": f"Bearer {os.environ.get('GOOSEAI_KEY')}"}
try:
response = requests.post(API_URL, headers=headers, json=query_input)
output = response.json()
output = output["choices"][0]["text"]
return output
except Exception:
return ""
def cohere_inference_api(prompt, max_tokens=3):
co = cohere.Client(os.environ.get("COHERE_API_KEY"))
try:
response = co.generate(
model="xlarge",
prompt=prompt,
max_tokens=max_tokens,
temperature=0.1,
k=0,
p=0.9,
frequency_penalty=0,
presence_penalty=0,
stop_sequences=[],
return_likelihoods="NONE",
)
time.sleep(0.5)
except Exception:
return ""
return response.generations[0].text
def ai21_inference_api(prompt, max_tokens=3):
response = requests.post(
"https://api.ai21.com/studio/v1/j1-jumbo/complete",
headers={"Authorization": f"Bearer {os.environ.get('AI21_API_KEY')}"},
json={
"prompt": prompt,
"numResults": 1,
"maxTokens": max_tokens,
"temperature": 0.9,
"topKReturn": 0,
"topP": 0.9,
"stopSequences": [],
},
)
output = response.json()
output = output["completions"][0]["data"]["text"]
return output
def hf_inference_local(prompt, model_name, model, tokenizer, max_length=3):
encoded = tokenizer(prompt, return_tensors="pt")
input_ids = encoded.input_ids.cuda()
if model_name not in ["t5-11b", "ul2"]:
max = encoded["attention_mask"].shape[1] + max_length
else:
max = max_length
outputs = model.generate(input_ids, max_length=max)
output = tokenizer.decode(outputs[0], skip_special_tokens=True)
torch.cuda.empty_cache()
gc.collect()
output = output.replace(prompt, "")
return output
def load_model(model_name, api=True):
if model_name.startswith("text"):
return partial(gpt3, model_name=model_name)
if api:
if (
model_name.find("opt-") >= 0
or model_name == "ul2"
or model_name == "EleutherAI/gpt-j-6B"
or model_name == "google/flan-t5-xxl"
):
return partial(local_inference_api)
if model_name == "gpt-neox-20b":
return partial(gooseai_inference_api)
if model_name == "co:here":
return partial(cohere_inference_api)
if model_name == "jurassic1-jumbo":
return partial(ai21_inference_api)
return partial(hf_inference_api, model_name=model_name)
config = AutoConfig.from_pretrained(model_name)
with init_empty_weights():
if "pt" in model_name:
model = AutoModelForCausalLM.from_config(config)
else:
model = AutoModelForSeq2SeqLM.from_config(config)
settings = MODEL_SETTINGS[model_name]
if "pt" in model_name:
if model_name.find("opt") >= 0:
device_map = infer_auto_device_map(
model.model,
no_split_module_classes=settings["no_split_module_classes"],
dtype="float16",
)
if model_name == "facebook/opt-30b":
# device_map["decoder.embed_tokens.weight"] = 0
device_map["decoder.layers.14"] = 3
device_map["decoder.layers.15"] = 3
device_map["decoder.layers.33"] = 3
device_map["decoder.layers.32"] = 3
device_map["decoder.layers.46"] = 3
device_map["decoder.layers.47"] = 3
if model_name == "facebook/opt-13b":
device_map["decoder.layers.29"] = 2
device_map["decoder.layers.30"] = 2
device_map["decoder.layers.31"] = 2
device_map["decoder.layers.32"] = 2
device_map["decoder.layers.33"] = 2
device_map["decoder.layers.34"] = 2
device_map["decoder.layers.34.self_attn"] = 2
device_map["decoder.layers.34.activation_fn"] = 2
device_map["decoder.layers.34.fc1"] = 2
device_map["decoder.layers.34.fc2"] = 2
device_map["decoder.layers.34.self_attn_layer_norm"] = 2
device_map["decoder.layers.34.final_layer_norm"] = 2
model.model = load_checkpoint_and_dispatch(
model.model,
settings["checkpoint"],
device_map=device_map,
no_split_module_classes=settings["no_split_module_classes"],
offload_folder="models_offload",
offload_state_dict=True,
)
model.lm_head = load_checkpoint_and_dispatch(
model.lm_head, settings["checkpoint"]
)
model.tie_weights()
else:
device_map = {
"transformer.wte": 0,
"transformer.drop": 0,
"transformer.h.0": 0,
"transformer.h.1": 0,
"transformer.h.2": 0,
"transformer.h.3": 0,
"transformer.h.4": 0,
"transformer.h.5": 0,
"transformer.h.6": 0,
"transformer.h.7": 0,
"transformer.h.8": 0,
"transformer.h.9": 0,
"transformer.h.10": 0,
"transformer.h.11": 0,
"transformer.h.12": 0,
"transformer.h.13": 0,
"transformer.h.14": 0,
"transformer.h.15": 0,
"transformer.h.16": 0,
"transformer.h.17": 0,
"transformer.h.18": 0,
"transformer.h.19": 0,
"transformer.h.20": 0,
"transformer.h.21": 0,
"transformer.h.22": 1,
"transformer.h.23": 1,
"transformer.h.24": 1,
"transformer.h.25": 1,
"transformer.h.26": 1,
"transformer.h.27": 1,
"transformer.ln_f": 1,
"lm_head": 1,
}
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map=device_map,
offload_folder="models_offload2",
offload_state_dict=True,
)
elif model_name == "google/flan-t5-xxl":
model = T5ForConditionalGeneration.from_pretrained(
settings["checkpoint"],
low_cpu_mem_usage=True,
torch_dtype=torch.bfloat16,
max_memory={0: "18GiB", 1: "10GiB"},
device_map="auto",
)
model.tie_weights()
else:
device_map = "auto"
if model_name == "ul2":
print("USING UL2")
device_map = {
"shared": 0,
"lm_head": 0,
"encoder": 0,
"decoder.embed_tokens": 1,
"decoder.block.0": 1,
"decoder.block.1": 1,
"decoder.block.2": 1,
"decoder.block.3": 1,
"decoder.block.4": 1,
"decoder.block.5": 1,
"decoder.block.6": 1,
"decoder.block.7": 1,
"decoder.block.8": 1,
"decoder.block.9": 1,
"decoder.block.10": 1,
"decoder.block.11": 1,
"decoder.block.12": 1,
"decoder.block.13": 1,
"decoder.block.14": 1,
"decoder.block.15": 1,
"decoder.block.16": 1,
"decoder.block.17": 1,
"decoder.block.18": 1,
"decoder.block.19": 1,
"decoder.block.20": 1,
"decoder.block.21": 1,
"decoder.block.22": 1,
"decoder.block.23": 1,
"decoder.block.24": 1,
"decoder.block.25": 1,
"decoder.block.26": 2,
"decoder.block.27": 2,
"decoder.block.28": 2,
"decoder.block.29": 2,
"decoder.block.30": 2,
"decoder.block.31": 2,
"decoder.final_layer_norm": 2,
"decoder.dropout": 2,
}
model = T5ForConditionalGeneration.from_pretrained(
settings["checkpoint"],
low_cpu_mem_usage=True,
torch_dtype=torch.bfloat16,
device_map=device_map,
)
model.tie_weights()
if model_name == "facebook/opt-13b":
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
else:
tokenizer = AutoTokenizer.from_pretrained(model_name)
return partial(
hf_inference_local, model_name=model_name, model=model, tokenizer=tokenizer
)
| [] |
2024-01-10 | fpedd/ees-pees | backend~webotsgym~env~grid~action.py | from gym.spaces import Discrete
class WbtActGrid():
"""Map proposed fake environment moves to webots.
Mapping:
------------
0: Right -> Up (1)
1: Down -> Left (2)
2: Left -> Down (3)
3: Up -> Right (4)
Parameters:
----------
config : WbtConfig
action_space : gym.spaces
action space from openai gyms to use stablebaselines
for grid actions the action space is 4 (see mapping)
direction_type : string
only dummy here because not needed in grid actions
type : string
action type has to be grid here in WbtActGrid class.
"""
def __init__(self, config=None):
"""Initialize WbtActGrid class."""
self.config = config
self.action_space = Discrete(4)
self.direction_type = "steering" # just a dummy
self.type = "grid"
def map(self, action):
"""Map fake environment action to webots.
Description:
------------
We use a different mapping in the fake environment as
in webots so that we use the function here to map them accordingly
Parameter:
----------
action : integer
action of the fake environment
Return:
-------
integer
action mapped on webots environment
"""
return int(action + 1)
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.