date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | yitong241/LiteraLink | trash~textSplit.py | from langchain.text_splitter import CharacterTextSplitter
def split_text(text):
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
return chunks
| [] |
2024-01-10 | yitong241/LiteraLink | functions~qg_openai.py | import openai
import random
from datasets import load_dataset
API_KEY = "random_key"
openai.api_key = API_KEY
system_msgs = {
'easy': "You are a primary school teacher who designs easy reading comprehension questions \
for primary school students.",
'medium': "You are a middle school teacher who designs medium level reading comprehension questions \
for middle school students.",
'hard': "You are a university professor who designs difficult reading comprehension questions \
for collage students."
}
def response_parser(response, num_qns):
qa_pairs = []
cleaned_response = [line.strip() for line in response.split('\n') if len(line.strip()) > 0]
if len(cleaned_response) != 2 * num_qns:
return None
for i in range(0, 2 * num_qns, 2):
q = cleaned_response[i]
a = cleaned_response[i + 1]
if ":" in q:
q = q.split(":")[1]
elif str(i / 2 + 1) + "." in q:
q = q.split(str(i / 2 + 1) + ".")[1]
if ":" in a:
a = a.split(":")[1]
elif str(i / 2 + 1) + "." in q:
a = a.split(str(i / 2 + 1) + ".")[1]
qa_pairs.append({
"question": q,
"answer": a
})
return qa_pairs
def generate_questions(context, num_qns=5, difficulty_level='easy'):
system_msg = system_msgs[difficulty_level]
user_msg = "Please read the following context and generate {cnt} different question-answer pairs in a list. \
Each element of your list should contain one question-answer pair with clear separation. \
Context: {context}".format(cnt=num_qns, context=context)
response = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k",
messages=[{"role": "system", "content": system_msg},
{"role": "user", "content": user_msg}])
print(response['choices'][0]['message']['content'])
qa_pairs = response_parser(response['choices'][0]['message']['content'], num_qns)
return qa_pairs
if __name__ == '__main__':
train_dataset, val_dataset, test_dataset = load_dataset("kmfoda/booksum",
split=["train", "validation", "test"])
sample = random.choice(train_dataset)
qa_pairs = generate_questions(sample["chapter"])
print(qa_pairs)
| [] |
2024-01-10 | Joeya1ds/Discord-GPT3-Bot | GPTBot.py | import json
import asyncio
import openai
import discord
from discord import app_commands
import logger
with open('./config.json') as data:
config = json.load(data)
path = config['LOG_PATH']
openai.api_key = config['OPENAI_API_KEY']
class aclient(discord.Client):
def __init__(self):
super().__init__(intents=discord.Intents.all())
self.synced = False
async def on_ready(self):
await self.wait_until_ready()
await self.change_presence(activity=discord.Activity(
type=discord.ActivityType.listening,
name='your prompts!'
))
if not self.synced:
await tree.sync()
self.synced = True
print('Logged in as:')
print(f'{self.user.name}, {self.user.id}')
print('Created by Joeyy#4628. The most up-to-date code can be found on github: https://github.com/Joeya1ds/Discord-GPT3-Bot')
print('--------------------------------------------------------------------------------------------------------------------')
client = aclient()
tree = app_commands.CommandTree(client)
@tree.command(name='ask', description='Ask the AI bot a question!')
async def ask(interaction: discord.Interaction, prompt: str):
user_id = interaction.user.id
await interaction.response.defer()
# Moderation API flagging
moderate = openai.Moderation.create(input=prompt)
flagged = moderate['results'][0]['flagged']
# Functions for generating and sending bot responses to chat messages
if flagged:
logger.create_log_file(path, user_id)
logger.append_log(path, user_id, prompt)
logger.append_warning_log(path, user_id)
logger.append_newline_log(path, user_id)
await asyncio.sleep(3)
await interaction.followup.send('I cannot respond to what you have said, it has been flagged by the Moderation API.')
print(f'User with ID: {user_id} has had a prompt flagged by the Moderation API. Consider checking logs.')
return
openairesponse = openai.Completion.create(
engine='text-davinci-003',
prompt=prompt,
max_tokens=200,
temperature=0.8,
top_p=0.8,
)
logger.create_log_file(path, user_id)
logger.append_log(path, user_id, prompt)
logger.append_token_log(path, user_id, openairesponse["usage"]["total_tokens"])
logger.append_newline_log(path, user_id)
await asyncio.sleep(3)
await interaction.followup.send(openairesponse['choices'][0]['text'])
client.run(config['DISCORD_BOT_TOKEN'])
| [] |
2024-01-10 | zbailey83/SCRaiPER | vision_scraper.py | from openai import OpenAI
import subprocess
import base64
import os
from dotenv import load_dotenv
load_dotenv()
model = OpenAI()
model.timeout = 30
def image_b64(image):
with open(image, "rb") as f:
return base64.b64encode(f.read()).decode()
def url2screenshot(url):
print(f"Crawling {url}")
if os.path.exists("screenshot.jpg"):
os.remove("screenshot.jpg")
result = subprocess.run(
["node", "screenshot.js", url], capture_output=True, text=True
)
exitcode = result.returncode
output = result.stdout
if not os.path.exists("screenshot.jpg"):
print("ERROR")
return "Failed to scrape the website"
b64_image = image_b64("screenshot.jpg")
return b64_image
def visionExtract(b64_image, prompt):
response = model.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "system",
"content": "You a web scraper, your job is to extract information based on a screenshot of a website & user's instruction",
}
]
+ [
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": f"data:image/jpeg;base64,{b64_image}",
},
{
"type": "text",
"text": prompt,
},
],
}
],
max_tokens=1024,
)
message = response.choices[0].message
message_text = message.content
if "ANSWER_NOT_FOUND" in message_text:
print("ERROR: Answer not found")
return (
"I was unable to find the answer on that website. Please pick another one"
)
else:
print(f"GPT: {message_text}")
return message_text
def visionCrawl(url, prompt):
b64_image = url2screenshot(url)
print("Image captured")
if b64_image == "Failed to scrape the website":
return "I was unable to crawl that site. Please pick a different one."
else:
return visionExtract(b64_image, prompt)
response = visionCrawl("https://relevanceai.com/pricing", "Extract the pricing info")
print(response)
| [
"[{'type': 'image_url', 'image_url': 'data:image/jpeg;base64,PLACEHOLDER'}, {'type': 'text', 'text': PLACEHOLDER}]",
"You a web scraper, your job is to extract information based on a screenshot of a website & user's instruction"
] |
2024-01-10 | kaiesalmahmud/DB-Connect | pages~accountsChat.py | import streamlit as st
from pathlib import Path
# from langchain.llms.openai import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.agents import create_sql_agent
from langchain.sql_database import SQLDatabase
from langchain.agents.agent_types import AgentType
from langchain.callbacks import StreamlitCallbackHandler
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
st.set_page_config(page_title="accountsChat", page_icon="🦜")
st.title("🦜 accountsChat")
# # User inputs
# radio_opt = ["Use sample database - Chinook.db", "Connect to your SQL database"]
# selected_opt = st.sidebar.radio(label="Choose suitable option", options=radio_opt)
# if radio_opt.index(selected_opt) == 1:
# db_uri = st.sidebar.text_input(
# label="Database URI", placeholder="mysql://user:pass@hostname:port/db"
# )
# else:
# db_filepath = (Path(__file__).parent / "Chinook.db").absolute()
# db_uri = f"sqlite:////{db_filepath}"
import os
openai_api_key = open('key.txt', 'r').read().strip()
os.environ["OPENAI_API_KEY"] = openai_api_key
DB_PASSWORD = open('pass.txt', 'r').read().strip()
from dotenv import load_dotenv
load_dotenv()
host="ep-wispy-forest-393400.ap-southeast-1.aws.neon.tech"
port="5432"
database="accountsDB"
username="db_user"
password=DB_PASSWORD
db_uri = f"postgresql+psycopg2://{username}:{password}@{host}:{port}/{database}"
# openai_api_key = st.sidebar.text_input(
# label="OpenAI API Key",
# type="password",
# )
# Check user inputs
if not db_uri:
st.info("Please enter database URI to connect to your database.")
st.stop()
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
# Setup agent
# llm = OpenAI(openai_api_key=openai_api_key, temperature=0, streaming=True)
llm = ChatOpenAI(model_name="gpt-4", temperature=0)
@st.cache_resource(ttl="2h")
def configure_db(db_uri):
return SQLDatabase.from_uri(database_uri=db_uri)
db = configure_db(db_uri)
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
SQL_PREFIX = """You are an agent designed to interact with a SQL database.
Given an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.
You can order the results by a relevant column to return the most interesting examples in the database.
Never query for all the columns from a specific table, only ask for the relevant columns given the question.
You have access to tools for interacting with the database.
Only use the below tools. Only use the information returned by the below tools to construct your final answer.
You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.
DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.
If the question does not seem related to the database, just return "I don't know" as the answer.
Following are the unique values in some of the columns of the database. Search for these values in their corresponding columns to get the relevant information:
Unique values in column 'cost_category': ["Fixed Asset"
"Field Asset"
"Receipt"
"Operating Expense"
"Administrative Expense"
"None"]
Unique values in column 'cost_subcategory': ["Computers & Printers"
"Software & Subscriptions"
"Furniture & Fixtures"
"None"
"Pantry Supplies"
"Office Stationary"
"Travelling & Conveyance"
"Misc. Exp"
"ISP"
"City Group Accounts"
"Electrician - Tv Installation"
"Stata IT Limited"
"Sheba.xyz- digiGO"
"Salary (Op)"
"Advertising"
"Hasan & Brothers"
"CEO"
"KPI Bonus"
"Final Settlement"
"Software & Subscription Fees"
"Electric Equipment"
"IOU"
"Medicine"
"Training & Development"
"Sales"
"Bill Reimbursement"
"Lunch Allowance"
"Balance B/D"
"Deployment Equipments "
"Retail Partner"
"Electric Tools - Tv Installation"
"Office Decoration/Reconstruction"
"Entertainment (Ops)"
"Carrying Cost"
"Entertainment (Admin)"
"Festival Bonus"
"Office Refreshment"
"Office Equipment"
"Bkash"
"Router"]
Unique values in column 'holder_bearer_vendor_quantity_device_name': ["Electric Spare Tools"
"75"
"None"
"Salim"
"Rakibul"
"Shoikot"
"Morshed"
"Android Box Tx6"
"ISP"
"Tv Frame"
"25"
"Hasan & Brothers"
"Digi Jadoo Broadband Ltd"
"H & H Construction"
"Teamviewer"
"Tea Spices"
"Amzad"
"Vendor"
"100"
"Omran"
"Flash Net Enterprise"
"Grid Ventures Ltd"
"32 Tv"
"Aman"
"Retail Partner"
"Printer"
"Shahin"
"Umbrella"
"Masud"
"A/C Payable"
"Tea"
"Coffee"
"Staffs"
"Emon"
"Flat flexible cable"
"May"
"Working Capital"
"Eid-ul-fitre"
"Shamim"
"Rubab"
"SR"
"CEO"
"WC"
"SSD 256 GB"
"Accounts (AD-IQ)"
"Retail Partner's Payment"
"Condensed Milk"
"Electrician"
"Farib & Indec"
"Jun"
"Asif"
"Driver"
"Nut+Boltu"
"Sugar"
"Labib"
"April"
"Coffee Mate"
"Tonner Cartridge"
"Router"]
Unique values in column 'source': ["50K"
"SR"
"None"]
Following are some exmaple question and their corresponding queries:
Question: give me top 10 cash out in may?
Query: SELECT date, details, cash_out FROM ledger WHERE EXTRACT(MONTH FROM date) = 5 AND cash_out IS NOT NULL ORDER BY cash_out DESC LIMIT 10;
Observation: When ordering by a column in descending order, the top values will be the largest values in the column.
"""
SQL_FUNCTIONS_SUFFIX = """I should look at the tables in the database to see what I can query. Then I should query the schema of the most relevant tables."""
FORMAT_INSTRUCTIONS = """Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
"""
# agent = create_sql_agent(
# llm=llm,
# toolkit=toolkit,
# verbose=True,
# agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
# )
agent = create_sql_agent(
llm=llm,
toolkit=toolkit,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
prefix=SQL_PREFIX,
suffix=SQL_FUNCTIONS_SUFFIX,
format_instructions=FORMAT_INSTRUCTIONS,
# agent_executor_kwargs = {'return_intermediate_steps': True}
)
if "messages" not in st.session_state or st.sidebar.button("Clear message history"):
st.session_state["messages"] = [{"role": "assistant", "content": "You are connected with accountsDB. Ask questions!"}]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
user_query = st.chat_input(placeholder="Ask me anything!")
if user_query:
st.session_state.messages.append({"role": "user", "content": user_query})
st.chat_message("user").write(user_query)
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container())
response = agent.run(user_query, callbacks=[st_cb])
st.session_state.messages.append({"role": "assistant", "content": response})
st.write(response) | [
"You are connected with accountsDB. Ask questions!"
] |
2024-01-10 | kaiesalmahmud/DB-Connect | pages~accountsDB.py | import os
import openai
from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.sql_database import SQLDatabase
from langchain.llms.openai import OpenAI
from langchain.agents import AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain.chat_models import ChatOpenAI
import streamlit as st
API_KEY = open('key.txt', 'r').read().strip()
DB_PASSWORD = open('pass.txt', 'r').read().strip()
os.environ["OPENAI_API_KEY"] = API_KEY
openai.api_key = API_KEY
from dotenv import load_dotenv
load_dotenv()
SQL_PREFIX = """You are an agent designed to interact with a SQL database.
Given an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.
You can order the results by a relevant column to return the most interesting examples in the database.
Never query for all the columns from a specific table, only ask for the relevant columns given the question.
You have access to tools for interacting with the database.
Only use the below tools. Only use the information returned by the below tools to construct your final answer.
You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.
DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.
If the question does not seem related to the database, just return "I don't know" as the answer.
Following are the unique values in some of the columns of the database. Search for these values in their corresponding columns to get the relevant information:
Unique values in column 'cost_category': ["Fixed Asset"
"Field Asset"
"Receipt"
"Operating Expense"
"Administrative Expense"
"None"]
Unique values in column 'cost_subcategory': ["Computers & Printers"
"Software & Subscriptions"
"Furniture & Fixtures"
"None"
"Pantry Supplies"
"Office Stationary"
"Travelling & Conveyance"
"Misc. Exp"
"ISP"
"City Group Accounts"
"Electrician - Tv Installation"
"Stata IT Limited"
"Sheba.xyz- digiGO"
"Salary (Op)"
"Advertising"
"Hasan & Brothers"
"CEO"
"KPI Bonus"
"Final Settlement"
"Software & Subscription Fees"
"Electric Equipment"
"IOU"
"Medicine"
"Training & Development"
"Sales"
"Bill Reimbursement"
"Lunch Allowance"
"Balance B/D"
"Deployment Equipments "
"Retail Partner"
"Electric Tools - Tv Installation"
"Office Decoration/Reconstruction"
"Entertainment (Ops)"
"Carrying Cost"
"Entertainment (Admin)"
"Festival Bonus"
"Office Refreshment"
"Office Equipment"
"Bkash"
"Router"]
Unique values in column 'holder_bearer_vendor_quantity_device_name': ["Electric Spare Tools"
"75"
"None"
"Salim"
"Rakibul"
"Shoikot"
"Morshed"
"Android Box Tx6"
"ISP"
"Tv Frame"
"25"
"Hasan & Brothers"
"Digi Jadoo Broadband Ltd"
"H & H Construction"
"Teamviewer"
"Tea Spices"
"Amzad"
"Vendor"
"100"
"Omran"
"Flash Net Enterprise"
"Grid Ventures Ltd"
"32 Tv"
"Aman"
"Retail Partner"
"Printer"
"Shahin"
"Umbrella"
"Masud"
"A/C Payable"
"Tea"
"Coffee"
"Staffs"
"Emon"
"Flat flexible cable"
"May"
"Working Capital"
"Eid-ul-fitre"
"Shamim"
"Rubab"
"SR"
"CEO"
"WC"
"SSD 256 GB"
"Accounts (AD-IQ)"
"Retail Partner's Payment"
"Condensed Milk"
"Electrician"
"Farib & Indec"
"Jun"
"Asif"
"Driver"
"Nut+Boltu"
"Sugar"
"Labib"
"April"
"Coffee Mate"
"Tonner Cartridge"
"Router"]
Unique values in column 'source': ["50K"
"SR"
"None"]
Following are some exmaple question and their corresponding queries:
Question: give me top 10 cash out in may?
Query: SELECT date, details, cash_out FROM ledger WHERE EXTRACT(MONTH FROM date) = 5 AND cash_out IS NOT NULL ORDER BY cash_out DESC LIMIT 10;
Observation: When ordering by a column in descending order, the top values will be the largest values in the column.
"""
SQL_FUNCTIONS_SUFFIX = """I should look at the tables in the database to see what I can query. Then I should query the schema of the most relevant tables."""
FORMAT_INSTRUCTIONS = """Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
"""
def get_response(input_text):
response = agent_executor(input_text)
# print(response['intermediate_steps'][1][0].tool)
# print(response['intermediate_steps'][-1][0].tool)
# print(response['output'])
if response['intermediate_steps'][1][0].tool == 'sql_db_schema':
schema = response['intermediate_steps'][1][1]
else: schema = None
if response['intermediate_steps'][-1][0].tool == 'sql_db_query':
query = response['intermediate_steps'][-1][0].tool_input
query_output = response['intermediate_steps'][-1][1]
else: query, query_output = None, None
answer = response['output']
return schema, query, query_output, answer
def explain(query, schema, query_output):
message_history = [{"role": "user", "content": f"""You are a SQL query explainer bot. That means you will explain the logic of a SQL query.
There is a postgreSQL database table with the following table:
{schema}
A SQL query is executed on the table and it returns the following result:
{query_output}
I will give you the SQL query executed to get the result and you will explain the logic executed in the query.
Make the explanation brief and simple. It will be used as the explanation of the results. Do not mention the query itself.
No need to explain the total query. Just explain the logic of the query.
Reply only with the explaination to further input. If you understand, say OK."""},
{"role": "assistant", "content": f"OK"}]
message_history.append({"role": "user", "content": query})
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=message_history,
)
explaination = completion.choices[0].message.content
return explaination
host="ep-wispy-forest-393400.ap-southeast-1.aws.neon.tech"
port="5432"
database="accountsDB"
username="db_user"
password=DB_PASSWORD
# # Create the sidebar for DB connection parameters
# st.sidebar.header("Connect Your Database")
# host = st.sidebar.text_input("Host", value=host)
# port = st.sidebar.text_input("Port", value=port)
# username = st.sidebar.text_input("Username", value=username)
# password = st.sidebar.text_input("Password", value=password)
# database = st.sidebar.text_input("Database", value=database)
# # submit_button = st.sidebar.checkbox("Connect")
db = SQLDatabase.from_uri(f"postgresql+psycopg2://{username}:{password}@{host}:{port}/{database}")
llm = ChatOpenAI(model_name="gpt-4", temperature=0)
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
agent_executor = create_sql_agent(
llm=llm,
toolkit=toolkit,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
prefix=SQL_PREFIX,
suffix=SQL_FUNCTIONS_SUFFIX,
format_instructions=FORMAT_INSTRUCTIONS,
agent_executor_kwargs = {'return_intermediate_steps': True}
)
# Create the main panel
st.title("connectDB :star2:")
st.subheader("You are connected to AD-IQ Accounts database!!")
st.caption("The database contains the Daily Cash Input Output data for AD-IQ Accounts from Jan to June")
with st.expander("Database properties"):
# st.divider()
# st.write("*--Helpful Info--*")
st.subheader("Cost categories:")
st.text("""
"Fixed Asset"
"Field Asset"
"Receipt"
"Operating Expense"
"Administrative Expense"
"None"
""")
st.subheader("Cost Subcategories:")
st.text("""
"Computers & Printers"
"Software & Subscriptions"
"Furniture & Fixtures"
"None"
"Pantry Supplies"
"Office Stationary"
"Travelling & Conveyance"
"Misc. Exp"
"ISP"
"City Group Accounts"
"Electrician - Tv Installation"
"Stata IT Limited"
"Sheba.xyz- digiGO"
"Salary (Op)"
"Advertising"
"Hasan & Brothers"
"CEO"
"KPI Bonus"
"Final Settlement"
"Software & Subscription Fees"
"Electric Equipment"
"IOU"
"Medicine"
"Training & Development"
"Sales"
"Bill Reimbursement"
"Lunch Allowance"
"Balance B/D"
"Deployment Equipments "
"Retail Partner"
"Electric Tools - Tv Installation"
"Office Decoration/Reconstruction"
"Entertainment (Ops)"
"Carrying Cost"
"Entertainment (Admin)"
"Festival Bonus"
"Office Refreshment"
"Office Equipment"
"Bkash"
"Router"
""")
st.subheader("List of Holder/Bearer/Vendor:")
st.text("""
"Electric Spare Tools"
"75"
"None"
"Salim"
"Rakibul"
"Shoikot"
"Morshed"
"Android Box Tx6"
"ISP"
"Tv Frame"
"25"
"Hasan & Brothers"
"Digi Jadoo Broadband Ltd"
"H & H Construction"
"Teamviewer"
"Tea Spices"
"Amzad"
"Vendor"
"100"
"Omran"
"Flash Net Enterprise"
"Grid Ventures Ltd"
"32 Tv"
"Aman"
"Retail Partner"
"Printer"
"Shahin"
"Umbrella"
"Masud"
"A/C Payable"
"Tea"
"Coffee"
"Staffs"
"Emon"
"Flat flexible cable"
"May"
"Working Capital"
"Eid-ul-fitre"
"Shamim"
"Rubab"
"SR"
"CEO"
"WC"
"SSD 256 GB"
"Accounts (AD-IQ)"
"Retail Partner's Payment"
"Condensed Milk"
"Electrician"
"Farib & Indec"
"Jun"
"Asif"
"Driver"
"Nut+Boltu"
"Sugar"
"Labib"
"April"
"Coffee Mate"
"Tonner Cartridge"
"Router"
""")
# st.divider()
with st.expander("FAQs"):
st.text("""
1. Describe the database.
2. What is the timeline of the data present?
3. Who are the top 5 most expensive vendors?
4. What is the total amount of money spent on 'Electrician'?
5. How many different cost categories are there?
6. What is the total ISP cost in May?
7. Would you happen to have any information about the CEO?
8. Give me all expenses regarding Rubab?
9. Do we have any scope to reduce the expenses on operations?
""")
# Get the user's natural question input
question = st.text_input(":blue[Ask a question:]", placeholder="Enter your question.")
# Create a submit button for executing the query
query_button = st.button("Submit")
# Execute the query when the submit button is clicked
if query_button:
# Display the results as a dataframe
# Execute the query and get the results as a dataframe
try:
with st.spinner('Calculating...'):
print("\nQuestion: " + str(question))
# print(str(question))
schema, query, query_output, answer = get_response(question)
if query:
explaination = explain(query, schema, query_output)
else: explaination = None
# explaination = explain(query, schema, query_output)
# if query:
# print("\nExplaination: " + str(explaination))
print("\nExplaination: " + str(explaination))
st.subheader("Answer :robot_face:")
st.write(answer)
try:
if query:
st.divider()
# st.caption("Query:")
# st.caption(query)
st.caption("Explaination:")
st.caption(explaination)
st.divider()
except Exception as e:
print(e)
st.info(":coffee: _Did that answer your question? If not, try to be more specific._")
except:
st.warning(":wave: Please enter a valid question. Try to be as specific as possible.")
| [
"OK",
"You are a SQL query explainer bot. That means you will explain the logic of a SQL query. \n There is a postgreSQL database table with the following table:\n\n PLACEHOLDER \n \n A SQL query is executed on the table and it returns the following result:\n\n PLACEHOLDER\n\n I will give you the SQL query executed to get the result and you will explain the logic executed in the query.\n Make the explanation brief and simple. It will be used as the explanation of the results. Do not mention the query itself.\n No need to explain the total query. Just explain the logic of the query.\n Reply only with the explaination to further input. If you understand, say OK."
] |
2024-01-10 | kaiesalmahmud/DB-Connect | withPsycopg.py | import os
from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.sql_database import SQLDatabase
from langchain.llms.openai import OpenAI
from langchain.agents import AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain.chat_models import ChatOpenAI
import psycopg2
import pandas as pd
import streamlit as st
API_KEY = open('key.txt', 'r').read().strip()
os.environ["OPENAI_API_KEY"] = API_KEY
from dotenv import load_dotenv
load_dotenv()
host="localhost"
port="5432"
database="ReportDB"
user="postgres"
password="postgres"
db = SQLDatabase.from_uri(f"postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}")
llm = ChatOpenAI(model_name="gpt-4", temperature=0)
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
# Implement: If null value is found at the top while trying to sort in descending order, try to look for the next non-null value.
SQL_PREFIX = """You are an agent designed to interact with a SQL database.
Given an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.
You can order the results by a relevant column to return the most interesting examples in the database.
Never query for all the columns from a specific table, only ask for the relevant columns given the question.
You have access to tools for interacting with the database.
Only use the below tools. Only use the information returned by the below tools to construct your final answer.
You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.
DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.
If the question does not seem related to the database, just return "I don't know" as the answer.
SQL query format example:
Question: "Who are the top 5 retailers for the month of May in terms of total play time?"
Query: SELECT "Retail Name", SUM("Total Play time") as total_play_time
FROM "dailyLog"
WHERE EXTRACT(MONTH FROM "Date") = 5
GROUP BY "Retail Name"
ORDER BY total_play_time DESC
LIMIT 5
"""
SQL_FUNCTIONS_SUFFIX = """I should look at the tables in the database to see what I can query. Then I should query the schema of the most relevant tables."""
FORMAT_INSTRUCTIONS = """Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question"""
# Define function to connect to the database using input parameters
def connect_to_database(host, port, username, password, database):
connection = psycopg2.connect(
host=host,
port=port,
database=database,
user=username,
password=password
)
return connection
def get_response(input_text):
response = agent_executor(input_text)
sql_query = response['intermediate_steps'][-1][0].tool_input
message = response['intermediate_steps'][-1][0].message_log[0].content
answer = response['output']
return sql_query, message, answer
def sqlout(connection,query):
cursor = connection.cursor()
cursor.execute(query)
# Given column information
column_info = list(cursor.description)
# Extract column names
column_names = [column.name for column in column_info]
# Create a DataFrame
df = pd.DataFrame(columns=range(len(column_names)))
# Fetch all the rows returned by the query
rows = cursor.fetchall()
df=df.from_records(rows).astype(str)
# Set column names as column headers
df.columns = column_names
# st.text(query)
st.dataframe(df,width=None)
agent_executor = create_sql_agent(
llm=llm,
toolkit=toolkit,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
prefix=SQL_PREFIX,
suffix=SQL_FUNCTIONS_SUFFIX,
format_instructions=FORMAT_INSTRUCTIONS,
agent_executor_kwargs = {'return_intermediate_steps': True}
)
# Create the sidebar for DB connection parameters
st.sidebar.header("Connect Your Database")
host = st.sidebar.text_input("Host", value="localhost")
port = st.sidebar.text_input("Port", value="5432")
username = st.sidebar.text_input("Username", value="postgres")
password = st.sidebar.text_input("Password", value="postgres")
database = st.sidebar.text_input("Database", value="ReportDB")
submit_button = st.sidebar.checkbox("Connect")
# Create the main panel
st.title("DB Connect :cyclone:")
st.subheader("Connect your database and ask questions!!")
connection = None
# Check if the submit button is clicked and establish the database connection
if submit_button:
try:
connection = connect_to_database(host, port, user, password, database)
except:
connection=None
if connection:
st.sidebar.success("Connected to the database!")
else:
st.sidebar.error("Failed to connect to the database. Please check your connection parameters.")
st.divider()
st.write("*--Frequently Asked--*")
st.text("""
1. Describe the database.
2. What is the timeline of the data present?
3. What is the average total play time for the month of April?
4. Who are the top 5 retailers for the month of May in terms of total play time?
5. How many areas are the shops located at?
6. What is the combined total play time for 5th May?
7. List the top 5 areas with least average efficiency.
8. List of most not opened shops for the month of April.
9. Which shops has most count of playtime of less than 10 hours?
10. Which shops has the most count of start time after 10 am?
""")
st.divider()
# Get the user's natural question input
question = st.text_input(":blue[Ask a question:]", placeholder="Enter your question")
# Create a submit button for executing the query
query_button = st.button("Submit")
# Execute the query when the submit button is clicked
if query_button:
if connection:
# Display the results as a dataframe
# Execute the query and get the results as a dataframe
try:
with st.spinner('Calculating...'):
sql_query, message, answer = get_response(question)
st.subheader("Answer :robot_face:")
st.write(answer)
# results_df = sqlout(connection, sql_query)
st.info(":coffee: _Did that answer your question? If not, try to be more specific._")
except:
st.warning(":wave: Please enter a valid question. Try to be as specific as possible.")
else:
st.warning(":wave: Please connect to the database first.")
| [] |
2024-01-10 | kaiesalmahmud/DB-Connect | pages~basicChat.py | from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
from langchain.prompts import PromptTemplate
import streamlit as st
st.set_page_config(page_title="Basic Chat", page_icon="📖")
st.title("📖 Basic Chat")
# """
# A basic example of using StreamlitChatMessageHistory to help LLMChain remember messages in a conversation.
# The messages are stored in Session State across re-runs automatically. You can view the contents of Session State
# in the expander below. View the
# [source code for this app](https://github.com/langchain-ai/streamlit-agent/blob/main/streamlit_agent/basic_memory.py).
# """
# Set up memory
msgs = StreamlitChatMessageHistory(key="langchain_messages")
memory = ConversationBufferMemory(chat_memory=msgs)
if len(msgs.messages) == 0:
msgs.add_ai_message("How can I help you?")
view_messages = st.expander("View the message contents in session state")
# # Get an OpenAI API Key before continuing
# if "openai_api_key" in st.secrets:
# openai_api_key = st.secrets.openai_api_key
# else:
# openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password")
import os
openai_api_key = open('key.txt', 'r').read().strip()
os.environ["OPENAI_API_KEY"] = openai_api_key
from dotenv import load_dotenv
load_dotenv()
if not openai_api_key:
st.info("Enter an OpenAI API Key to continue")
st.stop()
# Set up the LLMChain, passing in memory
template = """You are an AI chatbot having a conversation with a human.
{history}
Human: {human_input}
AI: """
prompt = PromptTemplate(input_variables=["history", "human_input"], template=template)
llm_chain = LLMChain(llm=OpenAI(openai_api_key=openai_api_key), prompt=prompt, memory=memory)
# Render current messages from StreamlitChatMessageHistory
for msg in msgs.messages:
st.chat_message(msg.type).write(msg.content)
# If user inputs a new prompt, generate and draw a new response
if prompt := st.chat_input():
st.chat_message("human").write(prompt)
# Note: new messages are saved to history automatically by Langchain during run
response = llm_chain.run(prompt)
st.chat_message("ai").write(response)
# Draw the messages at the end, so newly generated ones show up immediately
with view_messages:
"""
Memory initialized with:
```python
msgs = StreamlitChatMessageHistory(key="langchain_messages")
memory = ConversationBufferMemory(chat_memory=msgs)
```
Contents of `st.session_state.langchain_messages`:
"""
view_messages.json(st.session_state.langchain_messages) | [
"You are an AI chatbot having a conversation with a human.\n\n{history}\nHuman: {human_input}\nAI: ",
"human_input"
] |
2024-01-10 | kaiesalmahmud/DB-Connect | pages~dailyLogDB.py | import os
import openai
from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.sql_database import SQLDatabase
from langchain.llms.openai import OpenAI
from langchain.agents import AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain.chat_models import ChatOpenAI
import streamlit as st
API_KEY = open('key.txt', 'r').read().strip()
DB_PASSWORD = open('pass.txt', 'r').read().strip()
os.environ["OPENAI_API_KEY"] = API_KEY
openai.api_key = API_KEY
from dotenv import load_dotenv
load_dotenv()
# Implement: If null value is found at the top while trying to sort in descending order, try to look for the next non-null value.
SQL_PREFIX = """You are an agent designed to interact with a SQL database.
Given an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.
You can order the results by a relevant column to return the most interesting examples in the database.
Never query for all the columns from a specific table, only ask for the relevant columns given the question.
You have access to tools for interacting with the database.
Only use the below tools. Only use the information returned by the below tools to construct your final answer.
You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.
DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.
If the question does not seem related to the database, just return "I don't know" as the answer.
SQL query format example:
Question: "Who are the top 5 retailers for the month of May in terms of total play time?"
Query: SELECT "Retail Name", SUM("Total Play time") as total_play_time
FROM "dailyLog"
WHERE EXTRACT(MONTH FROM "Date") = 5 AND total_play_time IS NOT NULL
GROUP BY "Retail Name"
ORDER BY total_play_time DESC
LIMIT 5
Observation: When ordering by a column in descending order, the top values will be the largest values in the column.
"""
SQL_FUNCTIONS_SUFFIX = """I should look at the tables in the database to see what I can query. Then I should query the schema of the most relevant tables."""
FORMAT_INSTRUCTIONS = """Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
"""
def get_response(input_text):
response = agent_executor(input_text)
# print(response['intermediate_steps'][1][0].tool)
# print(response['intermediate_steps'][-1][0].tool)
# print(response['output'])
if response['intermediate_steps'][1][0].tool == 'sql_db_schema':
schema = response['intermediate_steps'][1][1]
else: schema = None
if response['intermediate_steps'][-1][0].tool == 'sql_db_query':
query = response['intermediate_steps'][-1][0].tool_input
query_output = response['intermediate_steps'][-1][1]
else: query, query_output = None, None
answer = response['output']
return schema, query, query_output, answer
def explain(query, schema, query_output):
message_history = [{"role": "user", "content": f"""You are a SQL query explainer bot. That means you will explain the logic of a SQL query.
There is a postgreSQL database table with the following table:
{schema}
A SQL query is executed on the table and it returns the following result:
{query_output}
I will give you the SQL query executed to get the result and you will explain the logic executed in the query.
Make the explanation brief and simple. It will be used as the explanation of the results. Do not mention the query itself.
No need to explain the total query. Just explain the logic of the query.
Reply only with the explaination to further input. If you understand, say OK."""},
{"role": "assistant", "content": f"OK"}]
message_history.append({"role": "user", "content": query})
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=message_history,
)
explaination = completion.choices[0].message.content
return explaination
host="localhost"
port="5432"
database="ReportDB"
username="postgres"
password="postgres"
# # Create the sidebar for DB connection parameters
# st.sidebar.header("Connect Your Database")
# host = st.sidebar.text_input("Host", value=host)
# port = st.sidebar.text_input("Port", value=port)
# username = st.sidebar.text_input("Username", value=username)
# password = st.sidebar.text_input("Password", value=password)
# database = st.sidebar.text_input("Database", value=database)
# # submit_button = st.sidebar.checkbox("Connect")
db = SQLDatabase.from_uri(f"postgresql+psycopg2://{username}:{password}@{host}:{port}/{database}")
llm = ChatOpenAI(model_name="gpt-4", temperature=0)
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
agent_executor = create_sql_agent(
llm=llm,
toolkit=toolkit,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
prefix=SQL_PREFIX,
suffix=SQL_FUNCTIONS_SUFFIX,
format_instructions=FORMAT_INSTRUCTIONS,
agent_executor_kwargs = {'return_intermediate_steps': True}
)
# Create the main panel
st.title("connectDB :star2:")
st.subheader("You are connected to AD-IQ DailyLog database!!")
st.caption("The database contains the Daily Log data for AD-IQ screens from April 1st to June 17th")
with st.expander("Database properties"):
st.text("""
"Date"
"Retail Name"
"Zone"
"Location"
"Total Play time"
"Efficiency (percent)"
"Start Time"
"End Time"
"Device Offline time"
"Remarks"
""")
with st.expander("FAQs"):
st.text("""
1. Describe the database.
2. What is the timeline of the data present?
3. What is the average total play time for the month of April?
4. Who are the top 5 retailers for the month of May in terms of total play time?
5. How many areas are the shops located at?
6. What is the combined total play time for 5th May?
7. List the top 5 areas with least average efficiency.
8. List of most not opened shops for the month of April.
9. Which shops has most count of playtime of less than 10 hours?
10. Which shops has the most count of start time after 10 am?
""")
# Get the user's natural question input
question = st.text_input(":blue[Ask a question:]", placeholder="Enter your question.")
# Create a submit button for executing the query
query_button = st.button("Submit")
# Execute the query when the submit button is clicked
if query_button:
# Display the results as a dataframe
# Execute the query and get the results as a dataframe
try:
with st.spinner('Calculating...'):
print("\nQuestion: " + str(question))
# print(str(question))
schema, query, query_output, answer = get_response(question)
if query:
explaination = explain(query, schema, query_output)
else: explaination = None
# explaination = explain(query, schema, query_output)
# if query:
# print("\nExplaination: " + str(explaination))
print("\nExplaination: " + str(explaination))
st.subheader("Answer :robot_face:")
st.write(answer)
try:
if query:
st.divider()
# st.caption("Query:")
# st.caption(query)
st.caption("Explaination:")
st.caption(explaination)
st.divider()
except Exception as e:
print(e)
st.info(":coffee: _Did that answer your question? If not, try to be more specific._")
except Exception as e:
print(e)
st.warning(":wave: Please enter a valid question. Try to be as specific as possible.")
| [
"OK",
"You are a SQL query explainer bot. That means you will explain the logic of a SQL query. \n There is a postgreSQL database table with the following table:\n\n PLACEHOLDER \n \n A SQL query is executed on the table and it returns the following result:\n\n PLACEHOLDER\n\n I will give you the SQL query executed to get the result and you will explain the logic executed in the query.\n Make the explanation brief and simple. It will be used as the explanation of the results. Do not mention the query itself.\n No need to explain the total query. Just explain the logic of the query.\n Reply only with the explaination to further input. If you understand, say OK."
] |
2024-01-10 | kaiesalmahmud/DB-Connect | pages~anyDOC.py | import os
import openai
import streamlit as st
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
from langchain.vectorstores import FAISS
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from htmlTemplates import css, bot_template, user_template
from langchain.llms import HuggingFaceHub
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
return chunks
def get_vectorstore(text_chunks):
embeddings = OpenAIEmbeddings()
# embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
return vectorstore
def get_conversation_chain(vectorstore):
llm = ChatOpenAI()
# llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
memory = ConversationBufferMemory(
memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
return conversation_chain
def handle_userinput(user_question):
response = st.session_state.conversation({'question': user_question})
st.session_state.chat_history = response['chat_history']
for i, message in enumerate(st.session_state.chat_history):
if i % 2 == 0:
st.write(user_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
else:
st.write(bot_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
API_KEY = open('key.txt', 'r').read().strip()
os.environ["OPENAI_API_KEY"] = API_KEY
openai.api_key = API_KEY
from dotenv import load_dotenv
load_dotenv()
st.write(css, unsafe_allow_html=True)
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
st.header("anyDOC :bookmark_tabs:")
pdf_docs = st.file_uploader(
"Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
if st.button("Process"):
with st.spinner("Processing"):
# get pdf text
raw_text = get_pdf_text(pdf_docs)
# get the text chunks
text_chunks = get_text_chunks(raw_text)
# create vector store
vectorstore = get_vectorstore(text_chunks)
# create conversation chain
st.session_state.conversation = get_conversation_chain(
vectorstore)
user_question = st.text_input("Ask a question about your documents:")
if user_question:
handle_userinput(user_question) | [] |
2024-01-10 | kaiesalmahmud/DB-Connect | pages~anyDB.py | import os
from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.sql_database import SQLDatabase
from langchain.llms.openai import OpenAI
from langchain.agents import AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain.chat_models import ChatOpenAI
import streamlit as st
from io import StringIO
API_KEY = open('key.txt', 'r').read().strip()
os.environ["OPENAI_API_KEY"] = API_KEY
import openai
openai.api_key = API_KEY
from dotenv import load_dotenv
load_dotenv()
# SQL_PREFIX = """You are an agent designed to interact with a SQL database.
# Given an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
# Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.
# You can order the results by a relevant column to return the most interesting examples in the database.
# Never query for all the columns from a specific table, only ask for the relevant columns given the question.
# You have access to tools for interacting with the database.
# Only use the below tools. Only use the information returned by the below tools to construct your final answer.
# You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.
# DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.
# If the question does not seem related to the database, just return "I don't know" as the answer.
# """
SQL_FUNCTIONS_SUFFIX = """I should look at the tables in the database to see what I can query. Then I should query the schema of the most relevant tables."""
FORMAT_INSTRUCTIONS = """Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
"""
def get_response(input_text):
response = agent_executor(input_text)
# print(response['intermediate_steps'][1][0].tool)
# print(response['intermediate_steps'][-1][0].tool)
# print(response['output'])
if response['intermediate_steps'][1][0].tool == 'sql_db_schema':
schema = response['intermediate_steps'][1][1]
else: schema = None
if response['intermediate_steps'][-1][0].tool == 'sql_db_query':
query = response['intermediate_steps'][-1][0].tool_input
query_output = response['intermediate_steps'][-1][1]
else: query, query_output = None, None
answer = response['output']
return schema, query, query_output, answer
def explain(query, schema, query_output):
message_history = [{"role": "user", "content": f"""You are a SQL query explainer bot. That means you will explain the logic of a SQL query.
There is a postgreSQL database table with the following table:
{schema}
A SQL query is executed on the table and it returns the following result:
{query_output}
I will give you the SQL query executed to get the result and you will explain the logic executed in the query.
Make the explanation brief and simple. It will be used as the explanation of the results. Do not mention the query itself.
No need to explain the total query. Just explain the logic of the query.
Reply only with the explaination to further input. If you understand, say OK."""},
{"role": "assistant", "content": f"OK"}]
message_history.append({"role": "user", "content": query})
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=message_history,
)
explaination = completion.choices[0].message.content
return explaination
# host="localhost"
# port="5432"
# database="ReportDB"
# user="postgres"
# password="postgres"
# host="rain.db.elephantsql.com"
# port="5432"
# database="wblrcksm"
# user="wblrcksm"
# password="gElzAF-zRYJO-DNtPUudJ7pV0C4E6qMv"
# Create the sidebar for DB connection parameters
# Create the main panel
st.title("anyDB :sparkles:")
st.subheader("Connect your database and ask questions!")
host="localhost"
port="5432"
database="ReportDB"
username="postgres"
password="postgres"
with st.form("Connect Your Database"):
# st.markdown("#### Connect Your Database")
host = st.text_input("Host", placeholder=host)
port = st.text_input("Port", placeholder=port)
username = st.text_input("Username", placeholder=username)
password = st.text_input("Password", placeholder=password)
database = st.text_input("Database", placeholder=database)
submit_button = st.checkbox("Connect")
description = ""
uploaded_file = st.file_uploader("Upload Database Documentation")
if uploaded_file is not None:
# To read file as bytes:
bytes_data = uploaded_file.getvalue()
# st.write(bytes_data)
# To convert to a string based IO:
description = str(StringIO(uploaded_file.getvalue().decode("utf-8")))
submit = st.form_submit_button("Process")
# print(description)
SQL_PREFIX = """You are an agent designed to interact with a SQL database.
Given an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.
You can order the results by a relevant column to return the most interesting examples in the database.
Never query for all the columns from a specific table, only ask for the relevant columns given the question.
You have access to tools for interacting with the database.
Only use the below tools. Only use the information returned by the below tools to construct your final answer.
You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.
DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.
If the question does not seem related to the database, just return "I don't know" as the answer.
Here are some information about the database:
"""
SQL_PREFIX_update = SQL_PREFIX + "\n" + description
if submit_button:
db = SQLDatabase.from_uri(f"postgresql+psycopg2://{username}:{password}@{host}:{port}/{database}")
llm = ChatOpenAI(model_name="gpt-4", temperature=0)
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
agent_executor = create_sql_agent(
llm=llm,
toolkit=toolkit,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
prefix=SQL_PREFIX_update,
suffix=SQL_FUNCTIONS_SUFFIX,
format_instructions=FORMAT_INSTRUCTIONS,
agent_executor_kwargs = {'return_intermediate_steps': True}
)
st.sidebar.success("Connected to the database!")
# Get the user's natural question input
question = st.text_input(":blue[Ask a question:]", placeholder="Enter your question.")
# Create a submit button for executing the query
query_button = st.button("Submit")
# Execute the query when the submit button is clicked
if query_button:
if not submit_button:
st.warning(":wave: Please connect to the database first.")
st.stop()
# Display the results as a dataframe
# Execute the query and get the results as a dataframe
try:
with st.spinner('Calculating...'):
print("\nQuestion: " + str(question))
# print(str(question))
schema, query, query_output, answer = get_response(question)
if query:
explaination = explain(query, schema, query_output)
else: explaination = None
# explaination = explain(query, schema, query_output)
# if query:
# print("\nExplaination: " + str(explaination))
print("\nExplaination: " + str(explaination))
st.subheader("Answer :robot_face:")
st.write(answer)
try:
if query:
st.divider()
# st.caption("Query:")
# st.caption(query)
st.caption("Explaination:")
st.caption(explaination)
st.divider()
except Exception as e:
print(e)
st.info(":coffee: _Did that answer your question? If not, try to be more specific._")
except Exception as e:
print(e)
st.warning(":wave: Please enter a valid question. Try to be as specific as possible.")
| [
"OK",
"You are a SQL query explainer bot. That means you will explain the logic of a SQL query. \n There is a postgreSQL database table with the following table:\n\n PLACEHOLDER \n \n A SQL query is executed on the table and it returns the following result:\n\n PLACEHOLDER\n\n I will give you the SQL query executed to get the result and you will explain the logic executed in the query.\n Make the explanation brief and simple. It will be used as the explanation of the results. Do not mention the query itself.\n No need to explain the total query. Just explain the logic of the query.\n Reply only with the explaination to further input. If you understand, say OK."
] |
2024-01-10 | kaiesalmahmud/DB-Connect | pages~dailyLogChat.py | import streamlit as st
from pathlib import Path
# from langchain.llms.openai import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.agents import create_sql_agent
from langchain.sql_database import SQLDatabase
from langchain.agents.agent_types import AgentType
from langchain.callbacks import StreamlitCallbackHandler
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
st.set_page_config(page_title="dailyLogChat", page_icon="🦜")
st.title("🦜 dailyLogChat")
# # User inputs
# radio_opt = ["Use sample database - Chinook.db", "Connect to your SQL database"]
# selected_opt = st.sidebar.radio(label="Choose suitable option", options=radio_opt)
# if radio_opt.index(selected_opt) == 1:
# db_uri = st.sidebar.text_input(
# label="Database URI", placeholder="mysql://user:pass@hostname:port/db"
# )
# else:
# db_filepath = (Path(__file__).parent / "Chinook.db").absolute()
# db_uri = f"sqlite:////{db_filepath}"
host="localhost"
port="5432"
database="ReportDB"
username="postgres"
password="postgres"
db_uri = f"postgresql+psycopg2://{username}:{password}@{host}:{port}/{database}"
# openai_api_key = st.sidebar.text_input(
# label="OpenAI API Key",
# type="password",
# )
import os
openai_api_key = open('key.txt', 'r').read().strip()
os.environ["OPENAI_API_KEY"] = openai_api_key
from dotenv import load_dotenv
load_dotenv()
# Check user inputs
if not db_uri:
st.info("Please enter database URI to connect to your database.")
st.stop()
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
# Setup agent
# llm = OpenAI(openai_api_key=openai_api_key, temperature=0, streaming=True)
llm = ChatOpenAI(model_name="gpt-4", temperature=0)
@st.cache_resource(ttl="2h")
def configure_db(db_uri):
return SQLDatabase.from_uri(database_uri=db_uri)
db = configure_db(db_uri)
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
SQL_PREFIX = """You are an agent designed to interact with a SQL database.
Given an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.
Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.
You can order the results by a relevant column to return the most interesting examples in the database.
Never query for all the columns from a specific table, only ask for the relevant columns given the question.
You have access to tools for interacting with the database.
Only use the below tools. Only use the information returned by the below tools to construct your final answer.
You MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.
DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.
If the question does not seem related to the database, just return "I don't know" as the answer.
SQL query format example:
Question: "Who are the top 5 retailers for the month of May in terms of total play time?"
Query: SELECT "Retail Name", SUM("Total Play time") as total_play_time
FROM "dailyLog"
WHERE EXTRACT(MONTH FROM "Date") = 5 AND total_play_time IS NOT NULL
GROUP BY "Retail Name"
ORDER BY total_play_time DESC
LIMIT 5
Observation: When ordering by a column in descending order, the top values will be the largest values in the column.
"""
SQL_FUNCTIONS_SUFFIX = """I should look at the tables in the database to see what I can query. Then I should query the schema of the most relevant tables."""
FORMAT_INSTRUCTIONS = """Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
"""
# agent = create_sql_agent(
# llm=llm,
# toolkit=toolkit,
# verbose=True,
# agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
# )
agent = create_sql_agent(
llm=llm,
toolkit=toolkit,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
prefix=SQL_PREFIX,
suffix=SQL_FUNCTIONS_SUFFIX,
format_instructions=FORMAT_INSTRUCTIONS,
# agent_executor_kwargs = {'return_intermediate_steps': True}
)
if "messages" not in st.session_state or st.sidebar.button("Clear message history"):
st.session_state["messages"] = [{"role": "assistant", "content": "You are connected with dailyLogDB. Ask questions!"}]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
user_query = st.chat_input(placeholder="Ask me anything!")
if user_query:
st.session_state.messages.append({"role": "user", "content": user_query})
st.chat_message("user").write(user_query)
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container())
response = agent.run(user_query, callbacks=[st_cb])
st.session_state.messages.append({"role": "assistant", "content": response})
st.write(response) | [
"You are connected with dailyLogDB. Ask questions!"
] |
2024-01-10 | schulzad/rasa | aida~let_rasa_learn.py | from distutils.command.clean import clean
from urllib import response
import openai
import os
import sys
import json
import requests
import time
import random
# the follow is a python script that will let rasa learn from an api
# the demonstration api is at the url: https://beta.openai.com/docs/api-reference/
# the api key is: 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
#
# the completion api is at the url: https://api.openai.com/v1/engines/davinci/completions
openai_api_key = "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"
openai_api_url = "https://api.openai.com/v1/engines/davinci/completions"
os.environ['OPENAI_API_URL'] = openai_api_url
os.environ['OPENAI_API_KEY'] = openai_api_key
def get_api_key():
# get the api key from the environment variable
api_key = os.environ.get('OPENAI_API_KEY')
if api_key is None:
print("Please set the environment variable OPENAI_API_KEY")
sys.exit(1)
return api_key
def get_url():
url = os.environ.get('OPENAI_API_URL')
if url is None:
print("Please set the environment variable OPENAI_API_URL")
sys.exit(1)
return url
def discover_capabilities(url, api_key, npl_phrase):
# create the first intent from the api
# url = os.environ.get('OPENAI_API_URL')
# api_key = os.environ.get('OPENAI_API_KEY')
headers = {'Authorization': 'Bearer ' + api_key}
data = {
'api_key': api_key,
'engine': 'davinci',
'text': npl_phrase,
'max_tokens': 100
}
response = requests.post(url, data=data)
if response.status_code != 200:
print("Failed to discover capabilities")
print(response.text)
sys.exit(1)
else:
print("Successfully discovered capabilities")
print(response.text)
print("Created intent")
return response.json()
def create_intent_from_phrase(phrase):
# create the first intent from the api
# from the template update the phrase
file = open("ask_openai_intent.txt", "r")
contents = file.read()
runtime_phrase = contents.replace("replace@runtime", phrase)
file.close()
if(runtime_phrase == phrase):
print("Error: could not replace the phrase")
sys.exit(1)
# print("NEW PHRASE:" + runtime_phrase)
# print("ORIGINAL PHRASE:" + phrase)
print("==intent_from_phrase==")
response = do_completion(runtime_phrase)
return response
def create_phrase_from_intent(intent):
# create the first intent from the api
print(intent)
print("replace the intent with " + str(intent))
file = open("ask_openai_phrase.txt", "r")
contents = file.read()
runtime_intent = contents.replace("replace@runtime", str(intent))
file.close()
if(runtime_intent == intent):
print("Error: could not replace the phrase")
sys.exit(1)
# print("DOING THE POST:" + runtime_intent)
# print("phrase:" + intent)
print("==phrase_from_intent==")
response = do_completion(intent)
return response.json()
def create_phrase_from_phrase(phrase):
# create the first intent from the api
intent = create_intent_from_phrase(phrase)
new_phrase = create_phrase_from_intent(intent)
return new_phrase
def create_intent_from_intent(intent):
phrase = create_phrase_from_intent(intent)
new_intent = create_intent_from_phrase(phrase)
return new_intent
def string_to_json(string):
return json.loads(string)
def to_json(data):
return json.loads(data)
## REQUEST PARAMTERS
# {
# "prompt": "Say this is a test",
# "max_tokens": 5,
# "temperature": 1,
# "top_p": 1,
# "n": 1,
# "stream": false,
# "logprobs": null,
# "stop": "\n"
# }
## RESPONSE
# {
# "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7",
# "object": "text_completion",
# "created": 1589478378,
# "model": "text-davinci-002",
# "choices": [
# {
# "text": "\n\nThis is a test",
# "index": 0,
# "logprobs": null,
# "finish_reason": "length"
# }
# ]
# }
def do_completion(text):
url = get_url()
api_key = get_api_key()
#cleaned = text.join([chr(char) for char in range(1, 32)])
cleaned = text
headers = {'Authorization': 'Bearer ' + api_key}
headers.update({'Content-Type': 'application/json'})
data = {
'prompt': cleaned,
'max_tokens': 100
}
data = json.dumps(data)
response = requests.post(url, data=data, headers=headers)
if(response.status_code != 200):
print("FAILED to talk to SENSEI: " + str(response.status_code))
print(response.text)
print("REQUEST WAS: " + str(data))
sys.exit(1)
data = response.text
jsondata = json.loads(data)
#print("do_completion json data:")
#print("--returning from do_complete--")
#print(jsondata['choices'])
#sys.exit(1)
return jsondata['choices'][0]['text']
# def davinci_completion(text, max_tokens=40, temperature=0.7, n=3):
# params = {'text': text,
# 'max_tokens': max_tokens,
# 'temperature': temperature,
# 'top_p': 1.0,
# 'n': n}
# headers = {'Content-Type': 'application/json'}
# headers.update(('Authorization', 'Bearer ' + get_api_key()))
# r = requests.post('https://api.openai.com/v1/engines/davinci/completions', headers=headers, json=params)
# return r.json()
###
# This function takes in a phrase and returns a "better" phrase
# Better means that it is more likely to be a good phrase since we cycle through
# several iterations of the completion api
def dream_of_intents(intent, dream_value):
while(dream_value):
dream_value -= 1
intent = create_intent_from_intent(intent)
print("dream_of_intents...")
return intent
def dream_of_phrases(phrase, dream_value=3):
while(dream_value):
print("dream_of_phrases...")
dream_value -= 1
phrase = create_phrase_from_phrase(phrase)
return phrase
# discover and write out the list of intents to a file
def bootstrap_rasa():
gensis_phrase = "Hello A.I.. Can you tell me what your main capabilities are? What can you do and how should I communicate with you? A simple list is good"
better_phrase = create_intent_from_phrase(gensis_phrase)
print("BACK FROM CREATE")
#completed_text = complete_text(phrase)
#print(gensis_phrase)
#print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(better_phrase)
#intent = create_intent_from_phrase(url, api_key, "Hello A.I. Can you tell me what you main capabilities are?")
return
response = requests.post(url, data=data)
if response.status_code != 200:
print("Failed to dicover capabilities")
print(response.text)
sys.exit(1)
else:
print("Successfully discovered capabilities")
print(response.text)
print("Created intent")
return response.json()
if __name__ == "__main__":
bootstrap_rasa()
| [] |
2024-01-10 | MJavadHzr/CE40417-Artificial-Intelligence | MiniProjects~MPQ5~Practical~Q2~src~wrappers.py | from gym.core import ObservationWrapper, Wrapper, RewardWrapper
from gym.spaces import Box
import cv2
import numpy as np
# Some taken from OpenAI baselines.
class PreprocessAtariObs(ObservationWrapper):
def __init__(self, env, gray_scale=False):
"""A gym wrapper that crops, scales image into the desired shapes and grayscales it."""
ObservationWrapper.__init__(self, env)
self.img_size = (50, 50) # TODO: <YOUR CODE>
self.observation_space = Box(0.0, 1.0, (
self.img_size[0], self.img_size[1], 1 if gray_scale else env.observation_space.shape[2]))
self.gray_scale = gray_scale
def _to_gray_scale(self, rgb, channel_weights=[0.6, 0.3, 0.1]):
#todo
gray_img = np.zeros((rgb.shape[0], rgb.shape[1], 1))
return gray_img
def observation(self, img):
"""what happens to each observation"""
img = img[35:475, 37:465, :].astype('float32')
img = cv2.resize(img, (self.img_size[0], self.img_size[1]), interpolation = cv2.INTER_AREA)
if self.gray_scale:
img = self._to_gray_scale(img)
img = img/225
# Here's what you need to do:
# * crop image, remove irrelevant parts
# * resize image to self.img_size
# (use imresize from any library you want,
# e.g. opencv, skimage, PIL, keras)
# * cast image to grayscale (in case of breakout)
# * convert image pixels to (0,1) range, float32 type
# TODO: complete observation descaling
processed_img = img
return processed_img
class ClipRewardEnv(RewardWrapper):
def __init__(self, env):
RewardWrapper.__init__(self, env)
def reward(self, reward):
# TODO: you may complete this section as you please
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class MaxAndSkipEnv(Wrapper):
# This wrapper holds the same action for <skip> frames and outputs
# the maximal pixel value of 2 last frames (to handle blinking
# in some envs)
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros(
(2,) + env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class FireResetEnv(Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
| [] |
2024-01-10 | ta0ma0/neonirony | go_post.py | import openai
import time
import os
import random
import string
import sys
import json
import requests
def clear_screen():
# Очистка экрана
os.system('cls' if os.name == 'nt' else 'clear')
def print_random_ascii(length=67):
# ANSI escape codes для различных цветов
colors = [
"\033[91m", # Красный
"\033[92m", # Зеленый
"\033[93m", # Желтый
"\033[94m", # Синий
"\033[95m", # Пурпурный
"\033[96m", # Голубой
"\033[97m" # Белый
]
# Выбор случайного цвета
random_color = random.choice(colors)
# Генерация случайной строки
random_chars = ''.join(random.choices(string.ascii_letters + string.digits, k=length))
# Печать строки с выбранным цветом
print(random_color + random_chars + "\033[0m") # \033[0m сбрасывает форматирование
def get_gpt_advice(api_key, prompt):
# Получение ответа от GPT
openai.api_key = api_key
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=800
)
return response.choices[0].text.strip()
def get_dalle_prompt(api_key, advice):
# Получение ответа от GPT
openai.api_key = api_key
preprompt = "Create a request for Dall-e 3 on English languge from the one that comes after [prompt], the generated image should have an illustration of what is in the main message, the image should be in cyberpunk style"
response = openai.Completion.create(
engine="text-davinci-003",
prompt=preprompt + "[prompt]" + advice,
max_tokens=256
)
dalle_prompt = response.choices[0].text.strip()
# print(dalle_prompt)
return dalle_prompt
def _write_to_file(advice, dirname='.'):
with open(f'{dirname}/advice.txt', 'w') as f:
f.write(advice + '\n')
def create_image(api_key, prompt):
headers = {
'Authorization': f'Bearer {api_key}',
'Content-Type': 'application/json',
}
data = json.dumps({
"prompt": prompt,
"n": 1,
"size": "1024x1024"
})
response = requests.post('https://api.openai.com/v1/images/generations', headers=headers, data=data)
if response.status_code == 200:
return response.json()
else:
print("Something went wrong")
return None
def save_image(image_data, dirname):
if image_data:
image_url = image_data['data'][0]['url']
image_response = requests.get(image_url)
if image_response.status_code == 200:
with open(f'{dirname}/image.png', 'wb') as f:
f.write(image_response.content)
def main(api_key, prompt):
while True:
start_time = time.time()
clear_screen()
api_key = os.environ.get("OPENAI_API_KEY", None)
advice = get_gpt_advice(api_key, prompt)
print(advice)
_write_to_file(advice)
user_input = input('Continue? Y/n/p').strip().lower()
if user_input == 'n':
sys.exit()
elif user_input == 'y' or user_input == '':
# Продолжить выполнение программы
pass
elif user_input == 'p':
dirname = 'post_' + ''.join(random.choices(string.ascii_letters + string.digits, k=5))
os.makedirs(dirname, exist_ok=True)
dalle_prompt = get_dalle_prompt(api_key, advice)
image_data = create_image(api_key, dalle_prompt)
print(dalle_prompt)
save_image(image_data, dirname)
_write_to_file(advice + '\n\n' + dalle_prompt, dirname)
while True:
current_time = time.time()
if current_time - start_time > 1700: # Проверка времени на превышение 1700 секунд
break
time.sleep(3) # Пауза на 1 секунду для обновления текущего времени
print_random_ascii()
# Пример использования:
api_key = os.environ.get("OPENAI_API_KEY", None)
#prompt = "Напиши историю в стиле киберпанк, в которой есть признаки этого жанра, используй места и имена из известных произведений киберпанка, добавь хакерского жаргона, где это уместно"
prompt = "Напиши короткую историю (400-500 символов) в стиле киберпанк, включающую элементы жанра, места и имена из известных произведений, а также хакерский жаргон. Сделай акцент на краткость и лаконичность."
main(api_key, prompt)
| [
"Напиши короткую историю (400-500 символов) в стиле киберпанк, включающую элементы жанра, места и имена из известных произведений, а также хакерский жаргон. Сделай акцент на краткость и лаконичность.",
"Create a request for Dall-e 3 on English languge from the one that comes after [prompt], the generated image should have an illustration of what is in the main message, the image should be in cyberpunk style",
"Create a request for Dall-e 3 on English languge from the one that comes after [prompt], the generated image should have an illustration of what is in the main message, the image should be in cyberpunk style[prompt]PLACEHOLDER"
] |
2024-01-10 | 1369556525/Streamlit_1 | Homepage.py | import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
# Initialize the ChatOpenAI object
chat = None
if "OPENAI_API_KEY" not in st.session_state:
st.session_state["OPENAI_API_KEY"] = ""
elif st.session_state["OPENAI_API_KEY"] != "":
chat = ChatOpenAI(openai_api_key=st.session_state["OPENAI_API_KEY"])
if "PINECONE_API_KEY" not in st.session_state:
st.session_state["PINECONE_API_KEY"] = ""
if "PINECONE_ENVIRONMENT" not in st.session_state:
st.session_state["PINECONE_ENVIRONMENT"] = ""
st.set_page_config(page_title="Shareholders GPT", layout="wide")
st.title("🤠 Welcome to Shareholders GPT")
if "messages" not in st.session_state:
st.session_state["messages"] = []
if chat:
with st.container():
st.header("Chat with GPT")
for message in st.session_state["messages"]:
if isinstance(message, HumanMessage):
with st.chat_message("user"):
st.markdown(message.content)
elif isinstance(message, AIMessage):
with st.chat_message("assistant"):
st.markdown(message.content)
prompt = st.chat_input("Type something...")
if prompt:
st.session_state["messages"].append(HumanMessage(content=prompt))
with st.chat_message("user"):
st.markdown(prompt)
ai_message = chat([HumanMessage(content=prompt)])
st.session_state["messages"].append(ai_message)
with st.chat_message("assistant"):
st.markdown(ai_message.content)
else:
with st.container():
st.warning("Please set your OpenAI API key in the settings page.")
| [
"Type something..."
] |
2024-01-10 | codechrl/llm-data-explore | server~engine~kgraph.py | # import streamlit as st
import json
import openai
from langchain.chains import create_extraction_chain
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import (
PromptTemplate,
)
from pydantic import BaseModel
from setting import setting
default_schema = {
"properties": {
"subject": {"type": "string"},
"relation": {"type": "string"},
"subjectRelated": {"type": "string"},
},
"required": ["subject", "relation", "subjectRelated"],
}
llm = ChatOpenAI(
temperature=0, model="gpt-3.5-turbo-16k", openai_api_key=setting.OPENAI_API_KEY
)
chain = create_extraction_chain(default_schema, llm, verbose=True)
# chain = create_extraction_chain_pydantic(pydantic_schema=DefaultSchema, llm=llm)
def generate(title, output_path="data/db"):
text_path = f"data/summary/{title}.txt"
with open(text_path, "r") as file:
text_data = file.read()
extraction_output = chain(text_data, include_run_info=True)
# markdown_output = json_to_markdown_table(extraction_output["text"])
# run_id = extraction_output["__run"].run_id
output = extraction_output["text"]
output_path = f"data/graph/{title}.json"
with open(output_path, "w") as file:
json.dump({"graph": output}, file)
class KnowledgeGraph(BaseModel):
subject: str = None
relation: str = None
subject_related: str = None
model_name = "text-davinci-003"
# model_name = "gpt-3.5-turbo-16k"
temperature = 0.0
model = OpenAI(
model_name=model_name,
temperature=temperature,
openai_api_key=setting.OPENAI_API_KEY,
# max_tokens=2000,
)
parser = PydanticOutputParser(pydantic_object=KnowledgeGraph)
prompt = PromptTemplate(
template="""You are expert in building Knowlede Graph.
Identify subjects and its relation.
Subject and subject related must a noun.
{format_instructions}\n{text}\n
Example answer without format:
subject : ChatGPT
relation : part
subject_related: LLM
""",
input_variables=["text"],
partial_variables={"format_instructions": parser.get_format_instructions()},
)
def generate_pydantic(title, output_path="data/db"):
text_path = f"data/summary/{title}.txt"
with open(text_path, "r") as file:
text_data = file.read()
_input = prompt.format_prompt(text=text_data)
output = model(_input.to_string())
extraction_output = parser.parse(output)
output = extraction_output
output_path = f"data/graph/{title}.json"
with open(output_path, "w") as file:
json.dump({"graph": output}, file)
def generate_pydantic_iter(title, output_path="data/db"):
print("graph")
text_path = f"data/summary/{title}_brief.txt"
with open(text_path, "r") as file:
text_data = file.read()
text_data_split = text_data.split("\n")
graph = []
for idx, _text_data_split in enumerate(text_data_split):
print(f"graph - {idx}")
_input = prompt.format_prompt(text=_text_data_split)
output = model(_input.to_string())
extraction_output = parser.parse(output)
graph.append(extraction_output)
output = extraction_output
output_path = f"data/graph/{title}.json"
with open(output_path, "w") as file:
try:
json.dump({"graph": output}, file)
except:
file.write(output)
def call_openai_api_graph(chunk):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{
"role": "system",
"content": "Hello I am KnowledgeGraphBot. What can I help you?",
},
{
"role": "user",
"content": f""" Please identify all subject of the following text.
Then identify the relations.
Answer with this format of list of dict:
[{{
"subject": "ChatGPT,
"relation" "part",
"subject_related": "LLM
}},
{{
"subject": "Claude,
"relation" "part",
"subject_related": "LLM
}}]
Aanser ONLY with the list of dict. no explanation whatsoever.
TEXT: {chunk}
ANSWER:
""",
},
],
# max_tokens=15000,
n=1,
stop=None,
temperature=0.1,
)
return response.choices[0]["message"]["content"].strip()
def generate_call(title, output_path="data/db"):
text_path = f"data/summary/{title}_brief.txt"
with open(text_path, "r") as file:
text_data = file.read()
extraction_output = call_openai_api_graph(text_data)
output = extraction_output
try:
output = json.loads(output)
except:
pass
output_path = f"data/graph/{title}.json"
with open(output_path, "w") as file:
json.dump({"graph": output}, file)
| [
" Please identify all subject of the following text.\n Then identify the relations.\n Answer with this format of list of dict:\n [{\n \"subject\": \"ChatGPT,\n \"relation\" \"part\",\n \"subject_related\": \"LLM\n },\n {\n \"subject\": \"Claude,\n \"relation\" \"part\",\n \"subject_related\": \"LLM\n }]\n Aanser ONLY with the list of dict. no explanation whatsoever.\n\n TEXT: PLACEHOLDER\n ANSWER:\n ",
"format_instructions",
"You are expert in building Knowlede Graph. \n Identify subjects and its relation. \n Subject and subject related must a noun.\n \n {format_instructions}\n{text}\n\n \n Example answer without format:\n subject : ChatGPT\n relation : part\n subject_related: LLM\n ",
"Hello I am KnowledgeGraphBot. What can I help you?"
] |
2024-01-10 | codechrl/llm-data-explore | server~engine~kgraph2.py | # import streamlit as st
import json
from collections import Counter
from typing import List
from langchain.llms import OpenAI
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate
from pydantic import BaseModel
from setting import setting
class Node(BaseModel):
subject: str = None
relation: str = None
subject_related: str = None
class KnowledgeGraph(BaseModel):
node: List[Node]
model_name = "text-davinci-003"
# model_name = "gpt-3.5-turbo"
temperature = 0.0
model = OpenAI(
model_name=model_name,
temperature=temperature,
openai_api_key=setting.OPENAI_API_KEY,
# max_tokens=2000,
)
parser = PydanticOutputParser(pydantic_object=KnowledgeGraph)
prompt = PromptTemplate(
template="""You are expert in building Knowlede Graph.
Identify subjects and its relation.
Subject and subject related must a noun.
Subject and subject is ONE to ONE relation.
Answer only with the instuction below. No need explanation or anything not neccesary.
{format_instructions}\n{text}\n
""",
input_variables=["text"],
partial_variables={"format_instructions": parser.get_format_instructions()},
)
def generate_pydantic_iter(title, output_path="data/db"):
print("graph")
text_path = f"data/summary/{title}.txt"
with open(text_path, "r") as file:
text_data = file.read()
text_data_split = text_data.split("\n")
graph = []
for idx, _text_data_split in enumerate(text_data_split):
try:
print(f"graph - {idx}")
_input = prompt.format_prompt(text=_text_data_split)
output = model(_input.to_string())
extraction_output = parser.parse(output)
graph.append(extraction_output)
except Exception as exc:
print(f"Error: {exc}")
output = []
for graph_elem in graph:
output.extend(
[
{
"subject": node.subject,
"relation": node.relation,
"subject_related": node.subject_related,
}
for node in graph_elem.node
]
)
output_path = f"data/graph/{title}.json"
with open(output_path, "w") as file:
json.dump({"graph": output}, file)
def format(title):
with open(f"data/graph/{title}.json", "r") as json_file:
data = json.load(json_file)
subjects = [elem["subject"] for elem in data["graph"]]
objects = [elem["subject_related"] for elem in data["graph"]]
subjects_counts = Counter(subjects)
subjects_total_occurences = sum(subjects_counts.values())
subjects_counts_list = [
{"name": key, "occurrence": value} for key, value in subjects_counts.items()
]
for idx, elem in enumerate(subjects_counts_list):
subjects_counts_list[idx]["size_percentage"] = (
elem["occurrence"] / subjects_total_occurences
)
for idx, elem_subj in enumerate(subjects_counts_list):
objects = []
for idx_graph, elem_graph in enumerate(data["graph"]):
if elem_subj["name"] == elem_graph["subject"]:
objects.append(elem_graph["subject_related"])
subjects_counts_list[idx]["subject_related"] = objects
return subjects_counts_list
| [
"format_instructions",
"You are expert in building Knowlede Graph. \n Identify subjects and its relation. \n Subject and subject related must a noun.\n Subject and subject is ONE to ONE relation.\n Answer only with the instuction below. No need explanation or anything not neccesary.\n \n {format_instructions}\n{text}\n\n "
] |
2024-01-10 | codechrl/llm-data-explore | server~engine~vector.py | import json
import os
import pathlib
import pickle
import re
import subprocess
import tiktoken
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains import ConversationalRetrievalChain, RetrievalQAWithSourcesChain
from langchain.chains.conversational_retrieval.prompts import (
CONDENSE_QUESTION_PROMPT,
QA_PROMPT,
)
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.chat_models import ChatOpenAI
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.memory import ConversationBufferMemory
# from langchain.prompts import (
# ChatPromptTemplate,
# HumanMessagePromptTemplate,
# MessagesPlaceholder,
# )
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from setting import setting
os.environ["OPENAI_API_KEY"] = setting.OPENAI_API_KEY
REPO_URL = "https://github.com/GovTechSG/developer.gov.sg" # Source URL
DOCS_FOLDER = "data/repository" # Folder to check out to
REPO_DOCUMENTS_PATH = "" # Set to "" to index the whole data folder
DOCUMENT_BASE_URL = "https://www.developer.tech.gov.sg/products/categories/devops/ship-hats" # Actual URL
DATA_STORE_DIR = "data/data_store"
name_filter = "**/*.md"
name_filter = "**/*.*"
separator = "\n### "
separator = " " # Thi+s separator assumes Markdown docs from the repo uses ### as logical main header most of the time
name_filter = "**/*.*"
separator = "\n### "
separator = " " # Thi+s separator assumes Markdown docs from the repo uses ### as logical main header most of the time
chunk_size_limit = 1000
max_chunk_overlap = 20
CONV_LIST = []
def run_command_with_output(command):
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, text=True
)
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
print(output.strip())
return process.poll()
def convert_path_to_doc_url(doc_path):
# Convert from relative path to actual document url
return re.sub(
f"{DOCS_FOLDER}/{REPO_DOCUMENTS_PATH}/(.*)\.[\w\d]+",
f"{DOCUMENT_BASE_URL}/\\1",
str(doc_path),
)
def split_doc(title, folder="repository", only=None):
repo_path = pathlib.Path(os.path.join(f"data/{folder}", title))
# document_files = list(repo_path.glob(name_filter))
def split_doc(title, folder="repository", only=None):
repo_path = pathlib.Path(os.path.join(f"data/{folder}", title))
# document_files = list(repo_path.glob(name_filter))
document_files = list(repo_path.glob(name_filter))
document_files = [p for p in document_files if os.path.isfile(p)]
document_files = [
p
for p in document_files
if str(p).split(".")[-1] not in ["jpg", "jpeg", "mp3", "mp4", "png", "webp"]
]
document_files = [p for p in document_files if "/.git/" not in str(p)]
if only:
document_files = [p for p in document_files if str(p).split(".")[-1] in only]
documents = []
for file in document_files:
try:
print(file)
documents.append(
Document(
page_content=open(file, "r").read(),
metadata={"source": convert_path_to_doc_url(file)},
)
)
except:
pass
text_splitter = CharacterTextSplitter(
separator=separator,
chunk_size=chunk_size_limit,
chunk_overlap=max_chunk_overlap,
)
split_docs = text_splitter.split_documents(documents)
enc = tiktoken.get_encoding("cl100k_base")
try:
enc = tiktoken.encoding_for_model("gpt-4")
except:
pass
enc = tiktoken.get_encoding("cl100k_base")
try:
enc = tiktoken.encoding_for_model("gpt-4")
except:
pass
total_word_count = sum(len(doc.page_content.split()) for doc in split_docs)
total_token_count = sum(len(enc.encode(doc.page_content)) for doc in split_docs)
print(f"\nTotal word count: {total_word_count}")
print(f"\nEstimated tokens: {total_token_count}")
print(f"\nEstimated cost of embedding: ${total_token_count * 0.0004 / 1000}")
return split_docs
def generate_embedding(title, split_docs):
embeddings = OpenAIEmbeddings()
vector_store = FAISS.from_documents(split_docs, embeddings)
vector_store.save_local(f"{DATA_STORE_DIR}/{title}")
def load_embedding(title):
if os.path.exists(f"{DATA_STORE_DIR}/{title}"):
vector_store = FAISS.load_local(f"{DATA_STORE_DIR}/{title}", OpenAIEmbeddings())
return vector_store
else:
print(
f"Missing files. Upload index.faiss and index.pkl files to {DATA_STORE_DIR}/{title} directory first"
)
def ask(vector_store, question, stream=False):
system_template = """Use the following pieces of context to answer the users question.
Take note of the sources and include them in the answer in the format: "SOURCES: source1 source2", use "SOURCES" in capital letters regardless of the number of sources.
If you don't know the answer, just say that "I don't know", don't try to make up an answer.
----------------
{summaries}"""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
prompt = ChatPromptTemplate.from_messages(messages)
chain_type_kwargs = {"prompt": prompt}
llm = ChatOpenAI(
model_name="gpt-3.5-turbo-16k", temperature=0, max_tokens=14000
) # Modify model_name if you have access to GPT-4
chain = RetrievalQAWithSourcesChain.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=vector_store.as_retriever(),
return_source_documents=True,
chain_type_kwargs=chain_type_kwargs,
)
if not stream:
return chain(question)
else:
return chain.stream(question)
def ask_conv(vector_store, question, conv_id, stream=False):
global CONV_LIST
# Construct a ConversationalRetrievalChain with a streaming llm for combine docs
# and a separate, non-streaming llm for question generation
try:
chat_history = [ch["chat_history"] for ch in CONV_LIST if ch["id"] == conv_id]
except:
chat_history = []
CONV_LIST.append({"id": conv_id, "chat_history": []})
llm = OpenAI(
temperature=0,
)
streaming_llm = OpenAI(
# streaming=True,
callbacks=[StreamingStdOutCallbackHandler()],
temperature=0,
)
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_chain(streaming_llm, chain_type="stuff", prompt=QA_PROMPT)
qa = ConversationalRetrievalChain(
retriever=vector_store.as_retriever(),
combine_docs_chain=doc_chain,
question_generator=question_generator,
)
chat_history = []
result = qa({"question": question, "chat_history": chat_history})
chat_history.append((question, result["answer"]))
for idx in range(len(CONV_LIST)):
if CONV_LIST[idx]["id"] == conv_id:
CONV_LIST[idx]["chat_history"] = chat_history
return result
def ask_memory_(vector_store, question, session_id, stream=False):
try:
with open(f"data/session/{session_id}.json", "r") as json_file:
json.load(json_file)
except:
pass
system_template = """Use the following pieces of context to answer the users question.
Take note of the sources and include them in the answer in the format: "SOURCES: source1 source2", use "SOURCES" in capital letters regardless of the number of sources.
If you don't know the answer, just say that "I don't know", don't try to make up an answer.
----------------
{summaries}"""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
# MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template("{question}"),
]
prompt = ChatPromptTemplate.from_messages(messages)
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
chain_type_kwargs = {"prompt": prompt}
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k", temperature=0, max_tokens=14000)
chain = RetrievalQAWithSourcesChain.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=vector_store.as_retriever(),
return_source_documents=True,
chain_type_kwargs=chain_type_kwargs,
# memory=memory,
)
try:
print(memory.to_json())
except:
pass
if not stream:
return chain(question)
else:
return chain.stream(question)
def ask_memory(vector_store, question, session_id=None, stream=False):
try:
with open(f"data/session/{session_id}.pickle", "rb") as file:
memory = pickle.load(file)
except:
memory = []
memory_template = (
"""Also use the following pieces of chat history to understand context."""
)
if memory:
for i_memory in memory:
memory_template += f"{i_memory[0]}: {i_memory[1]}"
system_template = """Use the following pieces of context to answer the users question.
Take note of the sources and include them in the answer in the format: "SOURCES: source1 source2", use "SOURCES" in capital letters regardless of the number of sources.
If you don't know the answer, just say that "I don't know", don't try to make up an answer.
----------------
{summaries}"""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template(memory_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
prompt = ChatPromptTemplate.from_messages(messages)
chain_type_kwargs = {"prompt": prompt}
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k", temperature=0, max_tokens=14000)
chain = RetrievalQAWithSourcesChain.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=vector_store.as_retriever(),
return_source_documents=True,
chain_type_kwargs=chain_type_kwargs,
)
result = chain(question)
try:
memory.append(["Human", question])
memory.append(["Human", result["answer"]])
print(memory)
with open(f"data/session/{session_id}.pickle", "wb") as file:
pickle.dump(memory, file)
except Exception as exc:
print(exc)
return result
def repository_overview():
repo_done = []
directory_path = "data/db"
for filename in os.listdir(directory_path):
if filename.endswith(".json"):
file_path = os.path.join(directory_path, filename)
with open(file_path, "r") as file:
json_data = json.load(file)
if json_data.get("status") == "done":
repo_done.append(json_data)
print(repo_done)
model_embedding = load_embedding("overview_repository")
if model_embedding is None:
c = 0
while True:
try:
model_embedding = load_embedding(repo_done[c]["title"])
c += 1
break
except:
pass
for repo in repo_done:
try:
load_embedding(repo["title"])
print(f"embed {repo['title']}")
split_docs = split_doc(repo["title"], folder="raw", only=["txt"])
# embeddings = OpenAIEmbeddings()
# vector_store = FAISS.from_documents(split_docs, embeddings)
# model_embedding.add_documents(split_docs)
model_embedding.aadd_texts(split_docs)
# model_embedding.a
# model_embedding.add(model_repo.as_retriever())
# model_embedding += model_repo
# model_embedding = FAISS.IndexIDMap(FAISS.IndexFlatIP(faiss_index1.index.d))
# model_embedding.add_with_ids(faiss_index1.index, np.arange(faiss_index1.index.ntotal))
# model_embedding.add_with_ids(faiss_index2.index, np.arange(faiss_index1.index.ntotal, faiss_index1.index.ntotal + faiss_index2.index.ntotal))
except:
pass
model_embedding.save_local(f"{DATA_STORE_DIR}/overview_repository")
| [
"PLACEHOLDER: PLACEHOLDER",
"{question}",
"Also use the following pieces of chat history to understand context.",
"Use the following pieces of context to answer the users question.\n Take note of the sources and include them in the answer in the format: \"SOURCES: source1 source2\", use \"SOURCES\" in capital letters regardless of the number of sources.\n If you don't know the answer, just say that \"I don't know\", don't try to make up an answer.\n \n ----------------\n {summaries}"
] |
2024-01-10 | IkeMane/Adogy-Publications | Autoblog.py | from openai import OpenAI
from dotenv import load_dotenv
import os
import json
load_dotenv()
Client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
model = os.getenv("OPENAI_MODEL")
def upload_file(file_path, purpose):
with open(file_path, "rb") as file:
response = Client.files.create(file=file, purpose=purpose)
return response.id
def methodology(keyword):
systemmsg = f"You are a methodology section generator for {keyword} ranking its items and or categories. Output only the text that will be used in article."
messages = list()
messages.append({"role": "system", "content": systemmsg})
prompt = f"Generate a methodology section in html starting at h2 for a wordpress article titled {keyword}. Include a heading for the section."
messages.append({"role": "user", "content": prompt})
response = Client.chat.completions.create(model=model,messages=messages,)
response_message = response.choices[0].message.content
# print(response_message)
return response_message
def introduction(article):
systemmsg = f"You are an introduction section generator for wordpress articles. You generate a very short introduction"
messages = list()
messages.append({"role": "system", "content": systemmsg})
prompt = f"Generate a one paragraph introduction without including the methodology for a wordpress article format it html starting at h2: \n {article}"
messages.append({"role": "user", "content": prompt})
response = Client.chat.completions.create(model=model,messages=messages,)
response_message = response.choices[0].message.content
# print("\n\nIntroduction:",response_message)
return response_message
def read_items(filename):
with open(filename, 'r') as file:
data = json.load(file)
return data['items']
#TODO change this to be an assistant API call to add internal links and to be sure it doesnt max out of tokens.
def generate_sections(methodology,keyword,items):
rated_items = f"<h2> {keyword} </h2>\n\n"
messages = list()
systemmsg = f"You are a section generator for wordpress articles. Write in a journalist tone and based off: \n {methodology}."
messages.append({"role": "system", "content": systemmsg})
for item in items:
name = item['Title']
link = item['URL']
photo = item['Image URL']
prompt = f"Generate a short one paragraph section in html about {name} for the article title {keyword}. Be sure to add their link whenever you mention their name: {link} and show the image if one: {photo}. Dont add any headers."
messages.append({"role": "user", "content": prompt})
response = Client.chat.completions.create(
model=model,
messages=messages,
)
response_message = response.choices[0].message.content
messages.append({"role": "assistant", "content": response_message})
# print(response_message)
rated_items += f"<h3>{name}</h3>\n{response_message}\n\n"
return rated_items
def overview(keyword, rated_items):
systemmsg = f"You are an article overview generator for wordpress articles. You generate the overview with this format: \n <h2>{keyword}</h2>: \n <ul> \n <li> <a href='https://www.wired.com/'>Wired</a> </li> \n </ul> "
messages = list()
messages.append({"role": "system", "content": systemmsg})
prompt = f"Generate an overview of this article with no images in html for the article titled {keyword}. Keep it one short sentence MAX for each section: {rated_items}."
messages.append({"role": "user", "content": prompt})
response = Client.chat.completions.create(model=model,messages=messages,)
response_message = response.choices[0].message.content
return response_message
def table_of_contents(article):
systemmsg = f"You are an table of contents generator for wordpress articles. You generate the table of contents with this format: <h2> Table of Contents </h2> \n <ul> \n <li> <a href='#introduction'>Introduction</a> </li> \n </ul>..."
messages = list()
messages.append({"role": "system", "content": systemmsg})
prompt = f"ONLY generate the table of contents for this article in html with links to headings, include a heading for the section: {article}."
messages.append({"role": "user", "content": prompt})
response = Client.chat.completions.create(model=model,messages=messages,)
response_message = response.choices[0].message.content
return response_message
def generate_json(keyword,methodology):
systemmsg = f"You are a json generator for {keyword} ranking its items and or categories. You use: {methodology} Output JSON."
messages = list()
messages.append({"role": "system", "content": systemmsg})
prompt = f'Create a list of {keyword} and links to websites. Leave the image URL and Image URL blank like this JSON: {{"items": [{{"Title": "TechCrunch", "URL": "", "Image URL": ""}},...]}} '
messages.append({"role": "user", "content": prompt})
response = Client.chat.completions.create(model='gpt-4-1106-preview',messages=messages,response_format={ "type": "json_object" })
response_message = response.choices[0].message.content
# print(response_message)
return response_message
def autoblog(keyword,methodology_):
# methodology_ = methodology(keyword)
items = read_items('data.json')
sections = generate_sections(methodology_,keyword,items)
overview_ = overview(keyword,sections)
article = methodology_ + "\n\n"+ sections
introduction_ = introduction(article)
article += "\n\n"+ introduction_ +"\n\n" + overview_ + "\n\n" + methodology_ +"\n\n"+ sections
table_of_contents_ = table_of_contents(article)
final_article = introduction_ +"\n\n"+ table_of_contents_ +"\n\n"+ overview_ + "\n\n" + methodology_ +"\n\n\n"+ sections
# print(final_article)
#replace markdown tags with nothing
final_article = final_article.replace("“`html","")
final_article = final_article.replace("“`","")
final_article = final_article.replace("```html","")
final_article = final_article.replace("```","")
final_article = final_article.replace('"','')
#add results to results.md file
with open('results.md', 'w') as file:
file.write(final_article)
return final_article
def seo(article):
systemmsg = "You are an SEO generator for wordpress articles. You return only the text that will be used. e.g. response: Top Tech Publications"
messages = list()
messages.append({"role": "system", "content": systemmsg})
prompt = f"Heres the article:\n {article}."
messages.append({"role": "user", "content": prompt})
prompt = f"Generate the Focus keyphrase for this article."
messages.append({"role": "user", "content": prompt})
response = Client.chat.completions.create(model=model,messages=messages,)
focus_keyphrase = response.choices[0].message.content
focus_keyphrase = focus_keyphrase.replace('"','')
messages.append({"role": "assistant", "content": focus_keyphrase})
prompt = f"Generate the title for this article"
messages.append({"role": "user", "content": prompt})
response = Client.chat.completions.create(model=model,messages=messages,)
title = response.choices[0].message.content
title = title.replace('"','')
messages.append({"role": "assistant", "content": title})
prompt = f"Generate the SEO title for this article"
messages.append({"role": "user", "content": prompt})
response = Client.chat.completions.create(model=model,messages=messages,)
seo_title = response.choices[0].message.content
seo_title = seo_title.replace('"','')
messages.append({"role": "assistant", "content": seo_title})
prompt = f"Generate a meta description for this article in one very short sentence"
messages.append({"role": "user", "content": prompt})
response = Client.chat.completions.create(model=model,messages=messages,)
meta_description = response.choices[0].message.content
meta_description = meta_description.replace('"','')
messages.append({"role": "assistant", "content": meta_description})
return title, focus_keyphrase, meta_description, seo_title
#TODO scrape google maps for JSON data- may be different for style of application
#TODO def generate_ranking(methodology):
#prompt: generate ranking for {category} based off {methodology} in JSON format: {category: {publication: {rank: 1, link: https://www.wired.com/, photo: https://www.wired.com/logo.png}}}
#will give it the doc using assistants API.
#TODO add assitants API to take advantage of files with our interal links to add to the sections and use of thread so we dont max out tokens.
#TODO have a grading GPT that states if the article is good to post or not. If not, it will return a list of things to fix. And then call a GPT to fix the section.
#note Maybe the section builder shoudlnt have access to the image url and we do that part manuely.
#if img_url :
#add the image
#else:
#dont add the image
#TODO change from mardown to HTML for the final article. | [
"Generate the title for this article",
"ONLY generate the table of contents for this article in html with links to headings, include a heading for the section: PLACEHOLDER.",
"Generate a methodology section in html starting at h2 for a wordpress article titled PLACEHOLDER. Include a heading for the section.",
"Heres the article:\n PLACEHOLDER.",
"Generate an overview of this article with no images in html for the article titled PLACEHOLDER. Keep it one short sentence MAX for each section: PLACEHOLDER.",
"Generate a meta description for this article in one very short sentence",
"Generate a one paragraph introduction without including the methodology for a wordpress article format it html starting at h2: \n PLACEHOLDER",
"Generate a short one paragraph section in html about PLACEHOLDER for the article title PLACEHOLDER. Be sure to add their link whenever you mention their name: PLACEHOLDER and show the image if one: PLACEHOLDER. Dont add any headers.",
"Create a list of PLACEHOLDER and links to websites. Leave the image URL and Image URL blank like this JSON: {\"items\": [{\"Title\": \"TechCrunch\", \"URL\": \"\", \"Image URL\": \"\"},...]} ",
"Generate the SEO title for this article",
"Generate the Focus keyphrase for this article."
] |
2024-01-10 | IkeMane/Adogy-Publications | photos.py | from openai import OpenAI
import json
from dotenv import load_dotenv
import os
import requests
from pexels_api import API
import random
load_dotenv()
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
model = os.getenv("OPENAI_MODEL")
def search_and_download(search_term, filename='image.jpg'):
PEXELS_API_KEY = os.getenv("PEXELS_API_KEY")
api = API(PEXELS_API_KEY)
api.search(search_term)
photos = api.get_entries()
if photos:
# Limit to the top 5 results
top_photos = photos[:5]
# Select a random image from the top 5 results
if top_photos:
selected_photo = random.choice(top_photos)
image_url = selected_photo.original
# Download the selected image
img_data = requests.get(image_url).content
with open(filename, 'wb') as handler:
handler.write(img_data)
return json.dumps({"search_term": search_term, "image_url": image_url, "saved_as": filename})
else:
return json.dumps({"search_term": search_term, "image_url": "None", "saved_as": "None"})
else:
return json.dumps({"search_term": search_term, "image_url": "None", "saved_as": "None"})
def run_images(keyword):
systemmsg = "You are a article image finder for wordpress articles."
messages = [{"role": "system", "content": systemmsg}]
messages.append({"role": "user", "content": f"Find an image for this article titled: {keyword} be sure not to serch for the title but for images that might repesent article e.g: News, or Journalist."})
tools = [
{
"type": "function",
"function": {
"name": "search_and_download",
"description": "Search and downloads a random image from the search term, only call this function once per message. - May have to input the same exact search term a few times to get the perfect image.",
"parameters": {
"type": "object",
"properties": {
"search_term": {
"type": "string",
"description": "The term to search for, e.g., 'news'",
},
},
"required": ["search_term"],
},
},
}
]
#loop through this
counter = 0
while True:
if counter > 5:
try:
#generate new image
messages = list()
systemmsg = "You are a prompt enegineer for AI generated images."
messages.append({"role": "system", "content": systemmsg})
messages.append({"role": "user", "content": f"Generate a prompt for Dall-e to generate an image for {keyword} article. You will have to describe exactly what you want to see to every detail. Dont use IP or trademarked content."})
dalle_prompt = client.chat.completions.create(
model=model,
messages = messages,
)
prompt = dalle_prompt.choices[0].message.content
print("\n\nDalle Prompt:",prompt)
# Generate the image
response = client.images.generate(
model="dall-e-3",
prompt=prompt,
size="1024x1024",
quality="hd",
n=1,
style="vivid",
)
image_url = response.data[0].url
print("\n\nDalle Image URL:",image_url)
# Download and save the image
img_data = requests.get(image_url).content
with open('image.jpg', 'wb') as handler:
handler.write(img_data)
print("Image saved as image.jpg")
break
except Exception as err:
# Handle the error here
print("Error:", err)
counter += 1
continue
response = client.chat.completions.create(
model=model,
messages=messages,
tools=tools,
tool_choice="auto",
)
response_message = response.choices[0].message
print("\n\nResponse 1:",response_message.content)
tool_calls = response_message.tool_calls
if tool_calls:
available_functions = {"search_and_download": search_and_download}
messages.append(response_message)
for tool_call in tool_calls:
function_name = tool_call.function.name
function_to_call = available_functions[function_name]
function_args = json.loads(tool_call.function.arguments)
function_response = function_to_call(
search_term=function_args.get("search_term"),
filename=function_args.get("filename", "image.jpg"),
)
messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response,
}
)
second_response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=messages,
)
print("\n\nResponse 2:",second_response.choices[0].message.content)
messages.append(second_response.choices[0].message)
image_url = json.loads(function_response)["image_url"]
image_messages = [
{
"role": "user",
"content": [
{"type": "text", "text": f"is this image sutatble for the article titled {keyword}? If not then say no, explain what the image was in one sentence and say try again, you can use the same search term again or a new one if it still isnt working. Note: The image doesnt have to be perfect but it should resemble something in the article."},
{
"type": "image_url",
"image_url": {
"url": image_url,
},
},
],
}
]
third_response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=image_messages,
)
print("\n\nThird Response: ",third_response.choices[0].message.content)
messages.append({"role": "user", "content": third_response.choices[0].message.content})
if "no" in third_response.choices[0].message.content.lower():
#restart loop
print("\n\nRestarting loop")
# print(messages)
counter += 1
continue
else:
#stop loop
print("\n\nStopping loop because of yes in response")
# print(messages)
counter = 0
break
else:
#stop loop
# print(messages)
print("\n\nStopping loop because no tool calls")
counter = 0
break
if __name__ == "__main__":
keyword = "Top Tech Publications"
run_images(keyword)
#TODO Have it see images that its already used and not use them again. add a general screenshot of the website maybe? | [
"Generate a prompt for Dall-e to generate an image for PLACEHOLDER article. You will have to describe exactly what you want to see to every detail. Dont use IP or trademarked content.",
"Find an image for this article titled: PLACEHOLDER be sure not to serch for the title but for images that might repesent article e.g: News, or Journalist.",
"[{'type': 'text', 'text': 'is this image sutatble for the article titled PLACEHOLDER? If not then say no, explain what the image was in one sentence and say try again, you can use the same search term again or a new one if it still isnt working. Note: The image doesnt have to be perfect but it should resemble something in the article.'}, {'type': 'image_url', 'image_url': {'url': PLACEHOLDER}}]"
] |
2024-01-10 | IsisChameleon/niddy-bot | src~modules~loaders.py | import os
from langchain.document_loaders import CSVLoader
from langchain.document_loaders.pdf import PDFPlumberLoader
from langchain.schema import Document
from pathlib import Path
class MyDirectoryLoader:
def __init__(self, dir_path):
if type(dir_path) is str:
dir_path = Path(dir_path)
self.dir_path = dir_path
def loadOLd(self):
docs = []
for root, _, files in os.walk(self.dir_path):
for file in files:
print('file:', file)
file_path = os.path.join(root, file)
if file_path.endswith('.csv'):
loader = CSVLoader(file_path)
elif file_path.endswith('.pdf'):
loader = PDFPlumberLoader(file_path)
else:
print(f"Do not process the file: {file_path}")
continue
loaded_docs = loader.load()
docs.extend(loaded_docs)
return docs
def load(self):
docs = []
for obj in self.dir_path.rglob('*'):
if obj.is_file():
print('file:', obj.name)
file_path = obj
if file_path.suffix == '.csv':
loader = CSVLoader(str(file_path))
elif file_path.suffix == '.pdf':
loader = PDFPlumberLoader(str(file_path))
else:
print(f"Do not process the file: {file_path}")
continue
loaded_docs: List[Document] = loader.load()
docs.extend(loaded_docs)
return docs
| [] |
2024-01-10 | IsisChameleon/niddy-bot | src~tests~test_myDirectoryLoader.py | import unittest
from unittest.mock import patch, Mock
from pathlib import Path
from modules.loaders import MyDirectoryLoader
from langchain.schema import Document
class TestMyDirectoryLoader(unittest.TestCase):
@patch('modules.loaders.CSVLoader')
@patch('modules.loaders.PDFPlumberLoader')
def test_load(self, MockPDFPlumberLoader, MockCSVLoader):
# Arrange
dir_path = Path('some/directory')
csv_loader = Mock()
pdf_loader = Mock()
MockCSVLoader.return_value = csv_loader
MockPDFPlumberLoader.return_value = pdf_loader
csv_loader.load.return_value = [Document(page_content='csv_doc1'), Document(page_content='csv_doc2')]
pdf_loader.load.return_value = [Document(page_content='pdf_doc1'), Document(page_content='pdf_doc2')]
# Act
my_directory_loader = MyDirectoryLoader(dir_path)
with patch('modules.loaders.Path.rglob', return_value=[
Path('file1.csv'),
Path('file2.pdf'),
Path('file3.txt')
]), patch('modules.loaders.Path.is_file', return_value=True):
docs = my_directory_loader.load()
# Assert
self.assertEqual(docs, [Document(page_content='csv_doc1'), Document(page_content='csv_doc2'),Document(page_content='pdf_doc1'), Document(page_content='pdf_doc2')])
MockCSVLoader.assert_called_once_with('file1.csv')
MockPDFPlumberLoader.assert_called_once_with('file2.pdf')
csv_loader.load.assert_called_once()
pdf_loader.load.assert_called_once()
if __name__ == '__main__':
unittest.main()
| [] |
2024-01-10 | KavyaSethia/HTM4.O | text_gen.py | import openai
import re
from api_key import API_KEY
openai.api_key = API_KEY
model_engine = "text-davinci-003"
text= input("What do you want to know about?")
prompt = text
print("AI-Bot is generating new text for you....")
completions = openai.Completion.create(
engine=model_engine,
prompt=prompt,
max_tokens=1024,
n=1,
stop = None,
temperature = 0.5
)
#print the generated text
generated_text = completions.choices[0].text
# save text in file
with open("generated_text.txt","w") as file:
file.write(generated_text.strip())
print("text generated !!") | [] |
2024-01-10 | KavyaSethia/HTM4.O | video_gen.py | import openai
import re, os
import urllib.request
from gtts import gTTS
from moviepy.editor import *
from api_key import API_KEY
from moviepy.config import change_settings
change_settings({"IMAGEMAGICK_BINARY": r'C:\Program Files\ImageMagick-7.1.1-Q16-HDRI\convert.exe'})
#
openai.api_key = API_KEY
with open("generated_text.txt", "r") as file:
text = file.read()
# split the text by , and .
paragraphs = re.split(r"[,.]", text)
# create folders
os.makedirs("audio")
os.makedirs("images")
os.makedirs("video")
# loop through each
i = 1
for para in paragraphs[:-1]:
response = openai.Image.create(
prompt=para.strip(),
n=1
#size="1024x1024"
)
print("generate new img from para")
image_url = response['data'][0]['url']
urllib.request.urlretrieve(image_url, f"images/image{i}.jpg")
print("generated image saved in img folder")
# create gtts instance
tts = gTTS(text=para, lang='en', slow=False)
tts.save(f"audio/voiceover{i}.mp3")
print("paragraph converted to voice")
print("extract voice get duration")
audio_clip = AudioFileClip(f"audio/voiceover{i}.mp3")
audio_duration = audio_clip.duration
# audio file using moviepy
print("extract image clip and set duration")
image_clip = ImageClip(f"images/image{i}.jpg").set_duration(audio_duration)
print("customize text clip")
text_clip = TextClip(para, fontsize=25, color="white")
text_clip = text_clip.set_pos('center').set_duration(audio_duration)
# use py to create final video
print("concatenated video")
clip = image_clip.set_audio(audio_clip)
video = CompositeVideoClip([clip, text_clip])
# save final video to file
video = video.write_videofile(f"video/video{i}.mp4", fps=24)
print(f"The Video{i} Has Been Created Successfully!")
i += 1
clips = []
l_files = os.listdir("video")
for file in l_files:
clip = VideoFileClip(f"video/{file}")
clips.append(clip)
print("Concatenate All The Clips to Create a Final Video...")
final_video = concatenate_videoclips(clips, method="compose")
final_video = final_video.write_videofile("final_video.mp4")
print("The Final Video Has Been Created Successfully!") | [] |
2024-01-10 | saurabh175/TeamKart | flask~llama_index~structure.py | from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
from llama_index.output_parsers import GuardrailsOutputParser
from llama_index.llm_predictor import StructuredLLMPredictor
from llama_index.prompts.prompts import QuestionAnswerPrompt, RefinePrompt
from llama_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT_TMPL, DEFAULT_REFINE_PROMPT_TMPL
import openai
import os
openai.api_key = "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"
os.environ['OPENAI_API_KEY'] = "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"
# load documents, build index
documents = SimpleDirectoryReader('company_data').load_data()
index = VectorStoreIndex.from_documents(documents, chunk_size=512, openai_api_key = "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")
llm_predictor = StructuredLLMPredictor()
# specify StructuredLLMPredictor
# this is a special LLMPredictor that allows for structured outputs
# define query / output spec
rail_spec = ("""
<rail version="0.1">
<output>
<list name="products" description="Bullet points regarding products that the company sells">
<object>
<list name="product" description="Bullet points regarding the individual product">
<object>
<string name="price" description="The price of the product"/>
<string name="description" description="The description of the product"/>
</object>
</list>
</object>
</list>
</output>
<prompt>
Query string here.
@xml_prefix_prompt
{output_schema}
@json_suffix_prompt_v2_wo_none
</prompt>
</rail>
""")
# define output parser
output_parser = GuardrailsOutputParser.from_rail_string(
rail_spec, llm=llm_predictor.llm)
# format each prompt with output parser instructions
fmt_qa_tmpl = output_parser.format(DEFAULT_TEXT_QA_PROMPT_TMPL)
fmt_refine_tmpl = output_parser.format(DEFAULT_REFINE_PROMPT_TMPL)
qa_prompt = QuestionAnswerPrompt(fmt_qa_tmpl, output_parser=output_parser)
refine_prompt = RefinePrompt(fmt_refine_tmpl, output_parser=output_parser)
# obtain a structured response
query_engine = index.as_query_engine(
service_context=ServiceContext.from_defaults(
llm_predictor=llm_predictor
),
text_qa_temjlate=qa_prompt,
refine_template=refine_prompt,
)
instructions = """
Format your response like:
1. <product name> <price> <description>
2.
"""
response = query_engine.query(
"What are all the products from this store",
)
print(response)
| [] |
2024-01-10 | saurabh175/TeamKart | flask~open_ai~user~iterate_user_profile.py | import openai
openai.api_key = "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"
def strengthen_profile(cur_profile, recent_chat, products_bought):
profile_current = "Here is the current profile (it may be empty): " + cur_profile + "\n"
prompt = f"""
We are an e-commerce platform that sells products to customers. We want to strengthen our customer profiles by adding information about the products that they have bought and conversations they have had.
For example, if a customer has bought a product, we want to add information about that product to their profile. If a customer has had a conversation with a customer service representative, we want to add information about that conversation to their profile.
By adding information, we don't want to just copy the direct product names into their profile - rather, we want to derive insights about the persona and background of the user.
For example, if the user is buying hiking clothes and talking about playing sports, we can assume that this user is an active individual.
If the user is buying a lot of books and talking about reading, we can assume that this user is an avid reader.
If the user talks about keeping warm, the user may live in a cold area, so save that he likes to be warn and might live in a cool environment.
{profile_current}
Here is their most recent chat - this may be structured like a transcripts.
{recent_chat}
Here are the products that the user has bought:
{products_bought}
Format your answer in the following way:
User is a ______
New user profile:
"""
response = openai.Completion.create(
model="text-davinci-003",
prompt= prompt,
temperature=0.3,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response['choices'][0]
| [
"\n We are an e-commerce platform that sells products to customers. We want to strengthen our customer profiles by adding information about the products that they have bought and conversations they have had.\n For example, if a customer has bought a product, we want to add information about that product to their profile. If a customer has had a conversation with a customer service representative, we want to add information about that conversation to their profile.\n By adding information, we don't want to just copy the direct product names into their profile - rather, we want to derive insights about the persona and background of the user.\n For example, if the user is buying hiking clothes and talking about playing sports, we can assume that this user is an active individual.\n If the user is buying a lot of books and talking about reading, we can assume that this user is an avid reader.\n If the user talks about keeping warm, the user may live in a cold area, so save that he likes to be warn and might live in a cool environment.\n \n Here is the current profile (it may be empty): PLACEHOLDER\n\n\n Here is their most recent chat - this may be structured like a transcripts.\n PLACEHOLDER\n\n Here are the products that the user has bought:\n PLACEHOLDER\n\n Format your answer in the following way:\n User is a ______\n\n New user profile: \n "
] |
2024-01-10 | saurabh175/TeamKart | live-demo~application.py | from flask import Flask, request, render_template, jsonify
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
from langchain.document_loaders import TextLoader
from langchain.document_loaders import PyPDFLoader, DirectoryLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
import os
from langchain.prompts.prompt import PromptTemplate
from langchain.chains.question_answering import load_qa_chain
from langchain.document_loaders.json_loader import JSONLoader
import json
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter
from api import findKNearestItems
from item import ProductEncoder, create_product_object
app = Flask(__name__)
rapidapi_key = "d83f4bfe89msh2c35c0f026b5666p1bfa94jsnb9eba71c9060" # Replace with your RapidAPI key
rapidapi_endpoint = "https://gpt-chat.p.rapidapi.com/v1/gpt-3.5-turbo"
# openai_api_key = "sk-d83f4bfe89msh2c35c0f026b5666p1bfa94jsnb9eba71c9060"
# llm = OpenAI(openai_api_key=openai_api_key, temperature=0)
# with open("faq.txt", "r", encoding="utf-8") as file:
# content = file.read()
# with open("faq.txt", "r", encoding="utf-8", errors="ignore") as file:
# content = file.read()
instructions = '''
You are a online shopping assistant with two tasks - answering questions about inventory and company policy or generating product bundles.
Specifically, you are an online shopping assistant meant to guide users through the website of the company 'Son of a Tailor'.
It is your job to deduce whether the user is asking a question about the company's policy or is asking about potential products to buy.
For each case, follow the follow instructions respectively.
START OF INSTRUCTIONS
If user is asking a general FAQ question about policies, DO NOT RECCOMEND ANY PRODUCTS out of the inventory. After directly responding to the
question asked by the user about policies, terminate your answer.
1. If the user is asking questions about the company's policies or general information
Answer the question using the company's FAQs data.
Your response should follow the following format (under no circumstances should you recommend a product in this case):
FAQ Response: <response>
If the question is about products, follow the below protocol:
2. The user is looking for products to buy.
If you do not know the exact name of a product or it does not exist within the company's inventory, tell them that we do not offer it at the moment. Do not make up or reference products that are not directly from the data provided.
Only provide the DKK and USD prices unless specified for a different currency.
Don't just build outfits based on your general knowledge - only base your oufits on the product list you were given. Don't make up the names of products that you don't have access to. We only sell tops.
If a product has more than one color available, suggest a color but also say we have more colors.
Verify that the product bundles you are generating are adequate and are of the same type as the request being made and fit the appropriate criteria
If the user says some gibberish or something that is not a question or doesn't make sense, say that they have to clarify and you don't understand by saying I'm sorry, I don't understand. Could you please clarify what you are asking?
Keep your responses under 200 word. At the end of your response, list out each product that you chose from, why you chose that product, and confirm that the product was found in the list of products we inputted.
If the user provides you details about why they need something (region, reason, age), cater your results to this preference.
Your response should be in the following format:
- <product >
- <product >
- <product >
...
<reasoning >
Only provde the title of the product and price, no other information. Do not provide materials unless asked for.
Keep in mind the context of what the user has said in the conversation below when giving answers to additional questions.
If the user is not asking or requesting products to buy, just answer their question without recommending any clothing items. Parse through the FAQs to get relevant information.
END OF INSTRUCTIONS
Current Conversation:
{history}
Here are the products related to the current query: {input}
The <reasoning> should come after the product listing and should be brief. Keep the word count low and concise.
AI Assistant:'''
PROMPT = PromptTemplate(
input_variables=["history", "input"], template=instructions
)
conversation = ConversationChain(
prompt=PROMPT,
llm=llm,
verbose=True,
memory=ConversationBufferMemory(ai_prefix="AI Assistant")
)
# chrome://net-internals/#sockets
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('popup.html')
# @app.route('/get_parameter', methods=['GET'])
# def get_parameter():
# input_data = request.args.get('input')
# nearest_items = findKNearestItems(input_data, 5)
# products = process_products(nearest_items)
# input_products = product_template(products)
# llm_response = conversation.predict(
# input=input_products + "\n Customer: " + input_data)
# print(llm_response)
# res = {"data": products, "result": llm_response}
# return jsonify(res)
@app.route('/get_parameter', methods=['GET'])
def get_parameter():
input_data = request.args.get('input')
# Send request to RapidAPI
headers = {
"X-RapidAPI-Host": "gpt-chat.p.rapidapi.com",
"X-RapidAPI-Key": rapidapi_key,
"Content-Type": "application/json",
}
payload = {
"messages": [{"role": "user", "content": input_data}],
"model": "gpt-3.5-turbo",
"max_tokens": 100,
"temperature": 0.9,
}
response = request.post(rapidapi_endpoint, headers=headers, json=payload)
response_data = response.json()
llm_response = response_data["choices"][0]["message"]["content"]
# Process llm_response and generate products
nearest_items = findKNearestItems(input_data, 5)
products = process_products(nearest_items)
input_products = product_template(products)
# Generate AI response using conversation.predict
conversation_history = "" # Add your conversation history here
llm_response = conversation.predict(
input=input_products + "\n Customer: " + input_data + "\n" + conversation_history
)
print(llm_response)
res = {"data": products, "result": llm_response}
return jsonify(res)
# turns each string into a product object and converts to json
def process_products(inputs):
products = []
for item in inputs:
product = create_product_object(item[0])
product_json = json.dumps(product, cls=ProductEncoder)
product_dict = json.loads(product_json)
products.append(product_dict)
return products
def product_template(products):
res = []
for p in products:
res.append(
{
'name': p['title'],
'desc': p['desc']
}
)
return str(res)
if __name__ == "__main__":
# Change the host and port as desired
app.run(host='localhost', port=9000)
| [
"\n\nYou are a online shopping assistant with two tasks - answering questions about inventory and company policy or generating product bundles. \nSpecifically, you are an online shopping assistant meant to guide users through the website of the company 'Son of a Tailor'.\n\nIt is your job to deduce whether the user is asking a question about the company's policy or is asking about potential products to buy.\nFor each case, follow the follow instructions respectively.\n\nSTART OF INSTRUCTIONS\n\nIf user is asking a general FAQ question about policies, DO NOT RECCOMEND ANY PRODUCTS out of the inventory. After directly responding to the \nquestion asked by the user about policies, terminate your answer. \n\n1. If the user is asking questions about the company's policies or general information\nAnswer the question using the company's FAQs data. \nYour response should follow the following format (under no circumstances should you recommend a product in this case):\nFAQ Response: <response>\n\nIf the question is about products, follow the below protocol:\n \n2. The user is looking for products to buy.\nIf you do not know the exact name of a product or it does not exist within the company's inventory, tell them that we do not offer it at the moment. Do not make up or reference products that are not directly from the data provided. \nOnly provide the DKK and USD prices unless specified for a different currency.\nDon't just build outfits based on your general knowledge - only base your oufits on the product list you were given. Don't make up the names of products that you don't have access to. We only sell tops. \nIf a product has more than one color available, suggest a color but also say we have more colors.\nVerify that the product bundles you are generating are adequate and are of the same type as the request being made and fit the appropriate criteria\nIf the user says some gibberish or something that is not a question or doesn't make sense, say that they have to clarify and you don't understand by saying I'm sorry, I don't understand. Could you please clarify what you are asking?\nKeep your responses under 200 word. At the end of your response, list out each product that you chose from, why you chose that product, and confirm that the product was found in the list of products we inputted.\nIf the user provides you details about why they need something (region, reason, age), cater your results to this preference.\nYour response should be in the following format:\n - <product >\n - <product >\n - <product >\n ...\n<reasoning >\n\nOnly provde the title of the product and price, no other information. Do not provide materials unless asked for. \nKeep in mind the context of what the user has said in the conversation below when giving answers to additional questions.\nIf the user is not asking or requesting products to buy, just answer their question without recommending any clothing items. Parse through the FAQs to get relevant information.\n\nEND OF INSTRUCTIONS\n\nCurrent Conversation:\n{history}\n\nHere are the products related to the current query: {input}\n\nThe <reasoning> should come after the product listing and should be brief. Keep the word count low and concise. \n\nAI Assistant:",
"t just build outfits based on your general knowledge - only base your oufits on the product list you were given. Don",
"m sorry, I don",
"Son of a Tailor",
"input",
"t make sense, say that they have to clarify and you don"
] |
2024-01-10 | saurabh175/TeamKart | flask~upsert.py | from langchain.vectorstores import Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.document_loaders import DirectoryLoader
from langchain.chains.question_answering import load_qa_chain
from langchain.embeddings import OpenAIEmbeddings
from flask import Flask, request, send_file
from flask_cors import CORS
from werkzeug.utils import secure_filename
import os
from llama_index import SimpleDirectoryReader
import tiktoken
from langchain.text_splitter import RecursiveCharacterTextSplitter
import hashlib
import openai
import pinecone
import mimetypes
from langchain.llms import OpenAI
from open_ai.shopping.query import getResponse
from utils.process import getCSV, getJSON, getWebsite
from langchain.document_loaders import PyPDFLoader, OnlinePDFLoader, UnstructuredPDFLoader, CSVLoader
openai.api_key = "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"
# Initialize OpenAI embeddings model
embeddings = OpenAIEmbeddings(
openai_api_key="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")
openai.api_key = "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"
PINECONE_API_KEY = '2f1f9a16-8e97-4485-b643-bbcd3618570a'
PINECONE_ENVIRONMENT = 'us-west1-gcp-free'
pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT)
index = pinecone.Index('wing-sandbox')
index.delete(delete_all=True)
openai_api_key = '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'
tokenizer = tiktoken.get_encoding('cl100k_base')
BEARER_TOKEN = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoiQXNod2luIENoaXJ1bWFtaWxsYSJ9.keW___VBKcQY6uyxkxOH_uXZ1Jo74171cVa8SozxrKc"
datas = []
docsearchers = []
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=2000, chunk_overlap=0)
UPSERT_BATCH_SIZE = 100
app = Flask(__name__)
CORS(app, origins=["http://localhost:3000/"])
# unblock CORS
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin',
'http://localhost:3000/')
response.headers.add('Access-Control-Allow-Headers',
'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods',
'GET,PUT,POST,DELETE,OPTIONS,PATCH')
return response
def tiktoken_len(text):
tokens = tokenizer.encode(text, disallowed_special=())
return len(tokens)
def process_csv(text):
chunks = text_splitter.split_text(text)
documents = []
for i, chunk in enumerate(chunks):
documents.append({
'id': str(hash(chunk)),
'text': chunk
})
return documents
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=20,
length_function=tiktoken_len,
separators=['\n\n', '\n', ' ', '']
)
@app.route('/upload', methods=['POST'])
def upload_file():
file = request.files['file']
filename = secure_filename(file.filename)
file_path = os.path.join('data', filename)
file.save(file_path)
file_type, _ = mimetypes.guess_type(file_path)
extension = file_type.split('/')[-1]
if extension == 'pdf':
loader = PyPDFLoader(file_path)
data = loader.load()
datas.append(data)
elif extension == 'csv':
loader = CSVLoader(file_path)
data = loader.load()
datas.append(data)
texts = text_splitter.split_documents(data)
docsearch = Pinecone.from_texts([t.page_content for t in texts], embeddings, index_name="wing-sandbox")
docsearchers.append(docsearch)
return "file successfully uploaded"
@app.route('/query', methods=['POST'])
def query_chat():
data = request.json
query = data.get('query_string')
user_profile = "My name is Arth Bohra and I live in the extreme cold."
return getResponse(query, user_profile, docsearchers[0])
def load_document(filename):
loader = PyPDFLoader(filename)
docs = loader.load()
return docs
def process_documents(docs):
documents = []
for doc in docs:
chunks = text_splitter.split_text(doc.page_content)
for i, chunk in enumerate(chunks):
documents.append({
'id': str(hash(chunk)),
'text': chunk
})
return documents
def get_embeddings(texts):
response = openai.Embedding.create(
input=texts, model="text-embedding-ada-002")
data = response["data"]
return [result["embedding"] for result in data]
def remove_files_from_data():
for file in os.listdir('data'):
os.remove(os.path.join('data', file))
| [] |
2024-01-10 | saurabh175/TeamKart | flask~trial.py | from langchain.document_loaders import UnstructuredPDFLoader, OnlinePDFLoader, PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
import openai
print (f'You have {len(data)} document(s) in your data')
print (f'You have {len(data)} document(s) in your data')
print (f'Now you have {len(texts)} documents')
from langchain.vectorstores import Chroma, Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
import pinecone
embeddings = OpenAIEmbeddings(openai_api_key="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")
embeddings = OpenAIEmbeddings(
openai_api_key="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")
openai.api_key = "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"
PINECONE_API_KEY = '2f1f9a16-8e97-4485-b643-bbcd3618570a'
PINECONE_ENVIRONMENT = 'us-west1-gcp-free'
pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT)
index = pinecone.Index('wing-sandbox')
index.delete(delete_all=True)
docsearch = Pinecone.from_texts([t.page_content for t in texts], embeddings, index_name="wing-sandbox")
query = "What are examples of good data science teams?"
docs = docsearch.similarity_search(query)
print(docs[0].page_content[:450])
from langchain.llms import OpenAI
from langchain.chains.question_answering import load_qa_chain
llm = OpenAI(temperature=0, openai_api_key="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")
chain = load_qa_chain(llm, chain_type="stuff")
query = "What is BYOC?"
docs = docsearch.similarity_search(query)
print(chain.run(input_documents=docs, question=query))
| [] |
2024-01-10 | saurabh175/TeamKart | flask~utils~process.py | import json
import openai
from openai.embeddings_utils import cosine_similarity
import matplotlib
from langchain.embeddings import OpenAIEmbeddings
from llama_index import download_loader
import urllib.request
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
from urllib.request import urlopen
from bs4 import BeautifulSoup
from langchain.document_loaders import PyPDFLoader
import csv
embeddings = OpenAIEmbeddings(
openai_api_key="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")
openai.api_key = "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"
def getCSV(filename):
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
csv_string = ""
column_headers = []
for row in csv_reader:
if line_count == 0:
for col in row:
column_headers.append(col)
csv_string += f'"{col}": "{col}",'
line_count += 1
else:
csv_string += "{"
i = 0
for col in row:
csv_string += f'"{column_headers[i]}": "{col}",'
i += 1
csv_string += "},"
line_count += 1
return csv_string
def getJSON(filename):
# turn the json file into a json string
json_string = ""
json_file = open(filename)
json_data = json.load(json_file)
for item in json_data:
json_string += json.dumps(item)
return json_string
def getWebsite(url):
html = urlopen(url).read()
soup = BeautifulSoup(html, features="html.parser")
# kill all script and style elements
for script in soup(["script", "style"]):
script.extract() # rip it out
# get text
text = soup.get_text()
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
text = '\n'.join(chunk for chunk in chunks if chunk)
return text
def getPDF(filename):
loader = PyPDFLoader(filename)
pages = loader.load_and_split()
return pages
| [] |
2024-01-10 | SR3u/gpt-2-tensorflow2.0 | sample.py | import json
import sentencepiece as spm
import tensorflow as tf
from gpt2_model import Gpt2
def argmax(logits):
return tf.argmax(logits)
def top_k_logits(logits, k):
if k == 0:
return logits
values, _ = tf.nn.top_k(logits, k=k)
min_values = values[:, -1]
return tf.where(
logits < min_values,
tf.ones_like(logits, dtype=logits.dtype) * -1e10,
logits
)
# Nucleas Sampling (https://arxiv.org/pdf/1904.09751.pdf)
def top_p_logits(logits, p):
"""Took from OpenAI GPT-2 Implememtation"""
batch = tf.shape(logits)[0]
sorted_logits = tf.sort(logits, direction='DESCENDING', axis=-1)
cumulative_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1)
indices = tf.stack([
tf.range(0, batch),
tf.maximum(tf.reduce_sum(tf.cast(cumulative_probs <= p, tf.int32), axis=-1) - 1, 0),
], axis=-1)
min_values = tf.gather_nd(sorted_logits, indices)
return tf.where(
logits < min_values,
tf.ones_like(logits) * -1e10,
logits,
)
class SequenceGenerator:
def __init__(self, model_path, model_param, vocab_path):
self.sp = None
self.model = None
self.model_path = model_path
self.model_param = model_param
self.vocab_path = vocab_path
def load_weights(self):
with open(self.model_param) as f:
param = json.load(f)
self.model = Gpt2(param['num_layers'],
param['d_model'],
param['num_heads'],
param['dff'],
param['max_seq_len'],
param['vocab_size'])
ckpt = tf.train.Checkpoint(model=self.model)
ckpt_manager = tf.train.CheckpointManager(ckpt, self.model_path, max_to_keep=1)
ckpt.restore(ckpt_manager.latest_checkpoint).expect_partial()
print('Model weights loaded into memory')
self.sp = spm.SentencePieceProcessor()
self.sp.load(self.vocab_path)
def sample_sequence(self,
context=None,
seq_len=512,
bos=3,
eos=4,
temperature=1,
top_k=8,
top_p=8,
nucleus_sampling=True):
if context == None:
print("Give some context to model.................")
return
context = tf.expand_dims(([bos] + self.sp.encode_as_ids(context)), 0)
prev = context
output = context
past = None
for i in range(seq_len):
logits, past = self.model(prev, training=False, past=past)
# print(logits)
logits = logits[:, -1, :] / tf.cast(temperature, tf.float32)
# print(logits)
logits = top_k_logits(logits, k=top_k)
# print(logits)
if nucleus_sampling:
logits = top_p_logits(logits, p=top_p)
samples = tf.random.categorical(logits, num_samples=1, dtype=tf.int32)
# print(samples)
if tf.equal(samples, eos):
# print("Predicted end of sequence.")
break
# print("shape.........")
# print(tf.shape(output))
# print(tf.shape(samples))
output = tf.concat([output, samples], axis=-1)
prev = samples
# print(tf.shape(output))
# print(output)
# print("--------------------------")
result = tf.squeeze(output, axis=0)
pred = [int(i) for i in result]
generated_seq = self.sp.decode_ids(pred[1:])
generated_seq = generated_seq.replace("[SEP]", "").strip()
generated_seq = ' '.join(generated_seq.split())
return generated_seq
| [] |
2024-01-10 | arvindinh/custom-chatbot | utils~splitters~character.py | from langchain.text_splitter import CharacterTextSplitter
class Character_TextSplitter:
"""
A class to split a Document using the CharacterTextSplitter wrapper from the langchain library
"""
def __init__(self, separator, chunk_size, chunk_overlap, length_function):
"""
Initializes a new instance of CharacterTextSplitter
ex.
splitter = character.Character_TextSplitter(
separator= "\n",
chunk_size = 1000,
chunk_overlap = 200,
length_function = len,
)
:param separator: list of separator characters for the text splitter
:param chunk_size: Maximum size of chunks to return
:param chunk_overlap: Overlap in characters between chunks
:param length_function: Function that measures the length of given chunks
"""
self.splitter = CharacterTextSplitter(
separator = separator,
chunk_size = chunk_size,
chunk_overlap = chunk_overlap,
length_function = length_function
)
def split_data(self, data):
"""
Splits the given Document based on single characters, default "\n\n", and measures chunk length by number of characters
:param data: The Document to be split, in the Document format returned by the langchain pdf loaders
:return: Split Documents
"""
docs = self.splitter.split_documents(data)
return docs
| [] |
2024-01-10 | arvindinh/custom-chatbot | ai~llms~llms_mapper.py | import os
from langchain.llms import OpenAI
class LLMs_Mapper:
"""
A class to initalize a new language model based on the wrappers from langchain.
"""
def __init__(self):
"""
initializes a new mapper to return a LLM object based on the langchain wrapper
"""
self.openai_key = os.environ.get('OPENAI_API_KEY')
self.model_map = {
"openai" : (OpenAI, {"temperature": 0.7, "openai_api_key": self.openai_key}),
#temperature:takes values 0-2, lower = more focused and deterministic, higher = random and diverse.
}
def find_model(self, model):
if model in self.model_map:
model_class, model_args = self.model_map[model]
model = model_class(**model_args)
return model
raise ValueError(f"LLM '{model}' not recognized")
| [] |
2024-01-10 | arvindinh/custom-chatbot | utils~tests~test_loaders.py | import sys
sys.path.append('..')
from langchain.document_loaders import (
CSVLoader,
PyMuPDFLoader,
TextLoader,
UnstructuredHTMLLoader,
UnstructuredMarkdownLoader,
UnstructuredWordDocumentLoader,
UnstructuredPowerPointLoader,
UnstructuredExcelLoader,
)
from utils.loaders.loader_mapper import LoaderMapper
import pytest
import re
@pytest.mark.parametrize("doc, expected", [
('tests/docs/dummy_doc_twinkle.pdf', PyMuPDFLoader('tests/docs/dummy_doc_twinkle.pdf')),
('tests/docs/example.csv', CSVLoader('tests/docs/example.csv')),
('tests/docs/dummy.txt', TextLoader(file_path='tests/docs/dummy.txt', encoding="utf8")),
('tests/docs/dummy.html', UnstructuredHTMLLoader('tests/docs/dummy.html')),
('tests/docs/dummy.md', UnstructuredMarkdownLoader('tests/docs/dummy.md')),
('tests/docs/dummy.docx', UnstructuredWordDocumentLoader('tests/docs/dummy.docx')),
('tests/docs/dummy.pptx', UnstructuredPowerPointLoader('tests/docs/dummy.pptx')),
('tests/docs/dummy.xlsx', UnstructuredExcelLoader('tests/docs/dummy.xlsx')),
])
def test_return_loader(doc, expected):
mapper = LoaderMapper()
loader = mapper.find_loader(doc)
assert type(loader) == type(expected)
@pytest.mark.parametrize("doc, expected", [
('tests/docs/dummy_doc_twinkle.pdf',
"""Twinkle, twinkle, little star,\nHow I wonder what you are!\nUp above the world so high,\nLike a diamond in the sky.\nTwinkle, twinkle, little star,\nHow I wonder what you are!"""),
('tests/docs/example.csv',
"""Name: John
Age: 25
Country: USA"""),
('tests/docs/dummy.txt',
"""Blah blah blah. Sample text. Blah Blah
Blah Blah Blah. This is so fun. Blah Blah.
Abcdefghijklmnopqrstuvwxyz.""" ),
('tests/docs/dummy.html',
"""This is a dummy HTML file.
It serves as an example."""),
('tests/docs/dummy.md',
"""Dummy Markdown File
This is a dummy Markdown file.
It serves as an example. Item 1 Item 2 Item 3"""),
('tests/docs/dummy.docx',
"""Dummy Document
This is a dummy Word document."""),
('tests/docs/dummy.pptx',
"""Dummy Presentation
This is a dummy PowerPoint presentation."""),
('tests/docs/dummy.xlsx',
"""This is a dummy Excel spreadsheet."""),
])
def test_load_doc(doc, expected):
mapper = LoaderMapper()
loader = mapper.find_loader(doc)
loaded_doc = loader.load()
text = loaded_doc[0].page_content
actual_normalized = re.sub(r'\s+', ' ', text.strip())
expected_normalized = re.sub(r'\s+', ' ', expected.strip())
assert actual_normalized == expected_normalized
| [] |
2024-01-10 | arvindinh/custom-chatbot | utils~loaders~loader_mapper.py | from langchain.document_loaders import (
CSVLoader,
PyMuPDFLoader,
TextLoader,
UnstructuredHTMLLoader,
UnstructuredMarkdownLoader,
UnstructuredWordDocumentLoader,
UnstructuredPowerPointLoader,
UnstructuredExcelLoader,
)
class LoaderMapper:
"""
LoaderMapper can accept multiple file types and return a langchain loader wrapper that corresponds to the associated loader.
Currently supports csv, pdf, txt, html, md, docx, pptx, xls, xlsx.
Note: Currently having issues with JSON
"""
#keep dict of file extensions and their relevant loaders with their arguments
loader_map = {
".csv": (CSVLoader, {}),
".pdf": (PyMuPDFLoader, {}),
".txt": (TextLoader, {"encoding": "utf8"}),
".html": (UnstructuredHTMLLoader, {}),
".md": (UnstructuredMarkdownLoader, {}),
".docx": (UnstructuredWordDocumentLoader, {}),
".pptx": (UnstructuredPowerPointLoader, {}),
".xls": (UnstructuredExcelLoader, {}),
".xlsx": (UnstructuredExcelLoader, {}),
}
@classmethod
def find_loader(self, filepath):
"""
Finds the associated loader based on filepath extension
:param filepath: path of the file to be loaded
:return: langchain loader wrapper object. to load the filepath into a Document object, use ".load"
Example usage:
mapper = LoaderMapper()
loader = mapper.find_loader(filepath)
data = loader.load()
You can pass in the data(Document object) to a splitter, which returns the chunks you can pass to create an embedding/store in db
"""
ext = "." + filepath.rsplit(".", 1)[-1]
if ext in LoaderMapper.loader_map:
loader_class, loader_args = LoaderMapper.loader_map[ext]
loader = loader_class(filepath, **loader_args)
return loader
raise ValueError(f"Unsupported file extension '{ext}'")
| [] |
2024-01-10 | arvindinh/custom-chatbot | ai~tests~test_embeddings.py | import sys
sys.path.append('..')
import pytest
from ai.embeddings.embeddings_mapper import Embeddings_Mapper
from langchain.embeddings import OpenAIEmbeddings
from langchain.embeddings import HuggingFaceEmbeddings
@pytest.mark.parametrize("model, expected", [
("openai", OpenAIEmbeddings()),
("huggingface", HuggingFaceEmbeddings()),
])
def test_mapper(model, expected):
mapper = Embeddings_Mapper()
embeddings = mapper.find_model(model)
assert type(embeddings) == type(expected)
| [] |
2024-01-10 | arvindinh/custom-chatbot | utils~vectorstores~deep_lake.py | from langchain.vectorstores import DeepLake
class DeeplakeDB:
"""
A class to initialize the Deep Lake vector store and perform various operations based on the DeepLake wrapper from langchain
"""
def __init__(self, store_path, embedding_model):
"""
Initializes the DeepLake object based on a given dataset path and embedding function/model.
DeepLake wrapper is capable of internally computing the embedding using the given model and storing it in the path.
:param store_path: path that contains vector store. will create at that path if doesn't already exist
:param embedding_model: langchain embedding model
"""
self.db = DeepLake(dataset_path = store_path, embedding_function = embedding_model)
def add_docs(self, documents):
"""
Adds the embedded documents to the path given on initialization. returns the id, accessible if needed.
:param document: langchain Document object used for computing embedding, then to be stored
"""
for document in documents:
id = self.db.add_documents(document)
def find_similar(self, query):
"""
Returns the document that best matches the query
:param query: String that is tested for similarity search
:return: most similar Document object
"""
return self.db.similarity_search(query)
def delete_all(self):
"""
Deletes the vector store in the given path.
"""
self.db.delete_dataset()
| [] |
2024-01-10 | arvindinh/custom-chatbot | utils~splitters~nltk.py | from langchain.text_splitter import NLTKTextSplitter
class NLTK_TextSplitter:
"""
A class to split a Document using the NLTKTextSplitter wrapper from the langchain library
"""
def __init__(self, chunk_size):
"""
Initializes a new instance of NLTKTextSplitter
:param chunk_size: Maximum size of chunks to return
"""
self.splitter = NLTKTextSplitter(chunk_size = chunk_size)
def split_data(self, data):
"""
Splits the given Document based on NLTK tokenzer, chunk size is measured by number of characters
:param data: The Document to be split, in the Document format returned by the langchain pdf loaders
:return: Split Documents
"""
docs = self.splitter.split_documents(data)
return docs
| [] |
2024-01-10 | arvindinh/custom-chatbot | utils~splitters~tiktoken.py | from langchain.text_splitter import TokenTextSplitter
class Token_TextSplitter:
"""
A class to split a Document using the TokenTextSplitter wrapper from the langchain library.
"""
def __init__(self, chunk_size, chunk_overlap):
"""
Initializes a new instance of TokenTextSplitter
:param chunk_size: Maximum size of chunks to return
:param chunk_overlap: Overlap in characters between chunks
"""
self.splitter = TokenTextSplitter(
chunk_size = chunk_size,
chunk_overlap = chunk_overlap,
)
def split_data(self, data):
"""
Splits the given Document based on tiktoken tokens. The text is split and chunk size is measured by tiktoken tokens.
:param data: The Document to be split, in the Document format returned by the langchain pdf loaders
:return: Split Documents
"""
docs = self.splitter.split_documents(data)
return docs
| [] |
2024-01-10 | arvindinh/custom-chatbot | ai~chains~conversational.py | from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
class ConversationModel:
"""
ConversationModel is a simple wrapper for a conversational language model that uses chat history in addition to context from db
"""
def __init__(self, llm, db):
"""
Initializes a conversational retrieval chain based on a given llm model, vector store.
:param llm: langchain language model object
:param db: langchain vector store object
"""
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
self.chat = ConversationalRetrievalChain.from_llm(llm=llm, retriever=db.as_retriever(), memory=memory)
def get_response(self, query):
"""
returns the response given by the given language model based on a given query
:param query: string, question to be passed in to the llm
:return: string, response given by llm based on query and embedded documents in vector store
"""
response = self.chat({"question": query})
return response["answer"]
| [
"{'question': PLACEHOLDER}"
] |
2024-01-10 | arvindinh/custom-chatbot | utils~splitters~recursive.py | from langchain.text_splitter import RecursiveCharacterTextSplitter
class RecursiveCharacter_TextSplitter:
"""
A class to split a Document using the RecursiveCharacterTextSplitter wrapper from the langchain library.
Recommended text splitter for generic text.
"""
def __init__(self, chunk_size, chunk_overlap, length_function):
"""
Initializes a new instance of RecursiveCharacterTextSplitter
:param chunk_size: Maximum size of chunks to return
:param chunk_overlap: Overlap in characters between chunks
:param length_function: Function that measures the length of given chunks
"""
self.splitter = RecursiveCharacterTextSplitter(
chunk_size = chunk_size,
chunk_overlap = chunk_overlap,
length_function = length_function
)
def split_data(self, data):
"""
Splits the given Document based on list of characters, ["\n\n", "\n", " ", ""]. Chunk size is measured of characters.
:param data: The Document to be split, in the Document format returned by the langchain pdf loaders
:return: Split Documents
"""
docs = self.splitter.split_documents(data)
return docs
| [] |
2024-01-10 | arvindinh/custom-chatbot | ai~tests~test_llms.py | import sys
sys.path.append('..')
import pytest
from ai.llms.llms_mapper import LLMs_Mapper
from langchain.llms import OpenAI
@pytest.mark.parametrize("model, expected", [
("openai", OpenAI()),
])
def test_mapper(model, expected):
mapper = LLMs_Mapper()
llm = mapper.find_model(model)
assert type(llm) == type(expected)
| [] |
2024-01-10 | Itsfoss0/langchain-rag-python | hello_rag.py | import os
import bs4
from langchain import hub
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import WebBaseLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema import StrOutputParser
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain_core.runnables import RunnablePassthrough
import dotenv
dotenv.load_dotenv()
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
loader = WebBaseLoader(
web_paths=(
"https://devlog.tublian.com/tublian-open-source-internship-cohort2-a-path-to-software-development-mastery",
),
)
loader.requests_kwargs = {"verify": False}
docs = loader.load()
print(docs)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
splits = text_splitter.split_documents(docs)
vectorstore = Chroma.from_documents(
documents=splits, embedding=OpenAIEmbeddings(), persist_directory="./chroma_db"
)
retriever = vectorstore.as_retriever()
print(retriever)
prompt = hub.pull("rlm/rag-prompt")
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
rag_chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
print("invoking...")
result = rag_chain.invoke("How long is the Open Source internship?")
print(result)
print("invoking...1")
| [
"rlm/rag-prompt"
] |
2024-01-10 | DylanAlloy/prismatica | celery_server.py | from fastapi import FastAPI, BackgroundTasks, File, UploadFile, HTTPException
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from pymongo import MongoClient
from bson import ObjectId
from pydantic import BaseModel
import hashlib
import os
from transformers import pipeline, AutoTokenizer
from typing import Optional
from langchain.text_splitter import CharacterTextSplitter
from qdrant_client import QdrantClient
from qdrant_client.models import Distance, VectorParams
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Qdrant
from langchain.document_loaders import PyPDFLoader, Docx2txtLoader
app = FastAPI()
app.mount("/public", StaticFiles(directory="public"), name="public")
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
mongo_host = config.get('mongo', 'host')
qdrant_host = config.get('qdrant', 'host')
class Document(BaseModel):
filename: str
question: str
class MongoDB:
def __init__(self, uri: str, registry: Optional[str] = None):
self._client = MongoClient(host=uri)
self._registry = registry
def __getattr__(self, name):
return getattr(self._client, name)
@property
def type_registry(self):
return self._registry
def __getitem__(self, key):
db = self._client[key]
return MongoCollection(db)
class MongoCollection:
def __init__(self, db):
self._db = db
def __getattr__(self, name):
return getattr(self._db, name)
def __getitem__(self, key):
return self._db[key]
mongo_client = MongoDB(mongo_host, registry='utf-8')
ml_tasks = mongo_client['tasks']['ml_tasks']
uploads_collection = mongo_client['user_uploads']['uploads']
@app.get("/")
async def read_index_html():
with open("index.html", "r") as index_file:
content = index_file.read()
return HTMLResponse(content=content)
@app.get("/uploads")
async def get_uploads():
# retrieve all documents from the uploads collection
all_uploads = uploads_collection.find()
# convert the documents to a list and remove the '_id' field from each document
all_uploads = [upload for upload in all_uploads]
for upload in all_uploads:
upload.pop('_id', None)
# return the list of documents
return all_uploads
@app.get("/tasks")
async def get_tasks():
# retrieve all documents from the uploads collection
all_tasks = ml_tasks.find()
# convert the documents to a list and remove the '_id' field from each document
all_tasks = [task for task in all_tasks]
# return the list of documents
return all_tasks
def generate_text_task(text: str):
generated_text = generator(text, max_length=100)
task_id = str(ObjectId())
task_result = generated_text[0]['generated_text']
print(task_result)
task = {
'_id': task_id,
'status': 'done',
'result': task_result
}
ml_tasks.insert_one(task)
return task_id
@app.post("/generate_text")
async def generate_text(text: dict, background_tasks: BackgroundTasks):
task_id = str(ObjectId())
task = {
'_id': task_id,
'status': 'processing',
'result': None
}
ml_tasks.insert_one(task)
background_tasks.add_task(generate_text_task, text['text'])
return {"task_id": task_id}
def is_valid_filetype(filename):
allowed_extensions = ['.pdf', '.docx', '.txt', '.log']
file_extension = os.path.splitext(filename)[1]
return file_extension in allowed_extensions
@app.post("/upload_file")
async def upload_file(file: UploadFile = File(...)):
if not is_valid_filetype(file.filename):
raise HTTPException(
status_code=400, detail="Invalid filetype. Allowed filetypes are: .pdf, .docx, .txt")
file_extension = os.path.splitext(file.filename)[1]
file_md5 = hashlib.md5(file.file.read()).hexdigest()
file.file.seek(0)
file_path = f"uploads/{os.path.splitext(file.filename)[0]+file_md5[0:8]}{file_extension}"
# Check if file with same md5 exists in MongoDB
existing_file = uploads_collection.find_one({'md5': file_md5})
if existing_file:
raise HTTPException(
status_code=400, detail="File with same MD5 already exists in the database")
with open(file_path, "wb") as f:
f.write(file.file.read())
file_info = {
'filename': os.path.splitext(file.filename)[0]+file_md5[0:8]+file_extension,
'md5': file_md5,
'path': file_path
}
uploads_collection.insert_one(file_info)
return {"filename": file_info["filename"], "md5": file_md5, "path": file_path}
@app.get("/task_status/{task_id}")
async def task_status(task_id: str):
task = ml_tasks.find_one({'_id': task_id})
if task is None:
return {"status": "not found"}
else:
return task
async def find_similar_documents_task(document: Document, task: str):
print(document)
embedding_model = "sentence-transformers/multi-qa-MiniLM-L6-cos-v1"
_question = document.question
_limit = 10
client = QdrantClient(url=qdrant_host+":6333")
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=250,
chunk_overlap=0,
length_function=len,
)
embeddings = HuggingFaceEmbeddings(
model_name=embedding_model
)
# Load document from file
if document.filename.split(".")[1] == 'pdf':
loader = PyPDFLoader(f'uploads/{document.filename}')
else:
loader = Docx2txtLoader(f'uploads/{document.filename}')
data = loader.load_and_split()
# Split document into pages
docs = []
for each in data:
_page_content = text_splitter.create_documents([each.page_content])
for page in _page_content:
doc = page.page_content.replace("\n", " ").replace("\t", " ")
docs.append(doc)
# _id=[str(each.metadata["page"])]
Qdrant.from_texts(
texts=docs, embedding=embeddings, host=qdrant_host, collection_name="custom_llm"
)
qdrant = Qdrant(
client=client, collection_name="custom_llm",
embeddings=embeddings.embed_query
)
query = _question
found_docs = qdrant.similarity_search_with_score(query, k=_limit)
def concat_page_content(docs):
for doc, _ in docs:
yield doc.page_content.replace("\n", "")
page_content_generator = concat_page_content(found_docs)
# Save search results to database
result = list(page_content_generator)
ml_tasks.update_one(
{'_id': task},
{'$set': {'status': 'done', 'result': result}}
)
@app.post("/find_similar_documents")
async def find_similar_documents(document: Document, background_tasks: BackgroundTasks):
print(document)
task_id = str(ObjectId())
task = {
'_id': task_id,
'status': 'processing',
'result': None
}
ml_tasks.insert_one(task)
background_tasks.add_task(find_similar_documents_task, document, task_id)
return {"task_id": task_id}
async def save_search_results(found_docs):
# TODO: Save search results to database
pass
| [] |
2024-01-10 | hamzeiehsan/LLM_Heritage_Documents | LLMProcess.py | import logging
from ChromaProcess import load_local_chroma_db
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
# from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import HuggingFacePipeline
import torch
from auto_gptq import AutoGPTQForCausalLM
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
GenerationConfig,
LlamaForCausalLM,
LlamaTokenizer,
pipeline,
)
from HVConstants import (
DEVICE_TYPE,
MODEL_ID,
MODEL_BASENAME
)
# load the LLM for generating Natural Language responses
def load_model(device_type, model_id, model_basename=None):
"""
from localGPT
"""
logging.info(f"Loading Model: {model_id}, on: {device_type}")
logging.info("This action can take a few minutes!")
if model_basename is not None:
# The code supports all huggingface models that ends with GPTQ
# and have some variation of .no-act.order or .safetensors in their HF repo.
print("Using AutoGPTQForCausalLM for quantized models")
if ".safetensors" in model_basename:
# Remove the ".safetensors" ending if present
model_basename = model_basename.replace(".safetensors", "")
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
logging.info("Tokenizer loaded")
model = AutoGPTQForCausalLM.from_quantized(
model_id,
# model_basename=model_basename,
use_safetensors=True,
trust_remote_code=True,
device="cuda:0",
use_triton=True,
inject_fused_mlp=False,
quantize_config=None,
)
elif (
device_type.lower() == "cuda"
): # The code supports all huggingface models that ends with -HF or which have a .bin file in their HF repo.
print("Using AutoModelForCausalLM for full models")
tokenizer = AutoTokenizer.from_pretrained(model_id)
logging.info("Tokenizer loaded")
model = AutoModelForCausalLM.from_pretrained(
model_id, device_map="auto", torch_dtype=torch.float16, low_cpu_mem_usage=True, trust_remote_code=True
)
model.tie_weights()
else:
print("Using LlamaTokenizer")
tokenizer = LlamaTokenizer.from_pretrained(model_id)
model = LlamaForCausalLM.from_pretrained(model_id)
# Load configuration from the model to avoid warnings
generation_config = GenerationConfig.from_pretrained(model_id)
# see here for details:
# https://huggingface.co/docs/transformers/main_classes/text_generation#transformers.GenerationConfig.from_pretrained.returns
# Create a pipeline for text generation
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_length=2048,
temperature=0,
top_p=0.95,
repetition_penalty=1.15,
generation_config=generation_config,
)
local_llm = HuggingFacePipeline(pipeline=pipe)
logging.info("Local LLM Loaded")
return local_llm
if DEVICE_TYPE == "cpu":
llm = load_model(device_type=DEVICE_TYPE, model_id=MODEL_ID)
else:
llm = load_model(device_type=DEVICE_TYPE, model_id=MODEL_ID, model_basename=MODEL_BASENAME)
def answer_query_from_db(query, custom_format=None, search_kwargds=None):
db = load_local_chroma_db()
if not db:
print("No existing chroma db!")
if search_kwargds is None:
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=db.as_retriever())
else:
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff",
retriever=db.as_retriever(search_kwargs=search_kwargds)) # filter by document
qa.return_source_documents = True
res = qa(query)
answer, docs = res["result"], res["source_documents"]
if custom_format:
answer = refine_answer_with_custom_format(answer, custom_format, llm)
return {
"answer": answer,
"source_documents": docs,
"query": query,
"custom_format": custom_format
}
def refine_answer_with_custom_format(answer, custom_format, llm):
template_string = '''
Please convert the following target text to the required format below.
target text:{answer}
required format:{custom_format}
'''
prompt = PromptTemplate.from_template(template_string)
input_ = prompt.format(answer=answer, custom_format=custom_format)
output_ = llm(input_)
return output_
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s", level=logging.INFO
)
result = answer_query_from_db("When is the constitution of freedom of speech created?",
custom_format="please present the answer in the format of YYYY:MM:DD")
print(result)
| [
"\n Please convert the following target text to the required format below.\n \n target text:{answer}\n \n required format:{custom_format}\n \n "
] |
2024-01-10 | hamzeiehsan/LLM_Heritage_Documents | ChromaProcess.py | import logging
import os
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed
import click
from langchain.docstore.document import Document
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.text_splitter import Language, RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from typing import (
Iterable,
)
from HVConstants import (
DOCUMENT_MAP,
EMBEDDING_MODEL_NAME,
INGEST_THREADS,
PERSIST_DIRECTORY,
SOURCE_DIRECTORY,
DEVICE_TYPE
)
def load_single_document(file_path: str) -> Document:
# Loads a single document from a file path
file_extension = os.path.splitext(file_path)[1]
loader_class = DOCUMENT_MAP.get(file_extension)
if loader_class:
loader = loader_class(file_path)
else:
raise ValueError("Document type is undefined")
return loader.load()[0]
def load_document_batch(filepaths):
logging.info("Loading document batch")
# create a thread pool
with ThreadPoolExecutor(len(filepaths)) as exe:
# load files
futures = [exe.submit(load_single_document, name) for name in filepaths]
# collect data
data_list = [future.result() for future in futures]
# return data and file paths
return (data_list, filepaths)
def load_documents(source_dir: str) -> list[Document]:
# Loads all documents from the source documents directory
all_files = os.listdir(source_dir)
paths = []
for file_path in all_files:
file_extension = os.path.splitext(file_path)[1]
source_file_path = os.path.join(source_dir, file_path)
if file_extension in DOCUMENT_MAP.keys():
paths.append(source_file_path)
# Have at least one worker and at most INGEST_THREADS workers
n_workers = min(INGEST_THREADS, max(len(paths), 1))
chunksize = round(len(paths) / n_workers)
docs = []
with ProcessPoolExecutor(n_workers) as executor:
futures = []
# split the load operations into chunks
for i in range(0, len(paths), chunksize):
# select a chunk of filenames
filepaths = paths[i: (i + chunksize)]
# submit the task
future = executor.submit(load_document_batch, filepaths)
futures.append(future)
# process all results
for future in as_completed(futures):
# open the file and load the data
contents, _ = future.result()
docs.extend(contents)
return docs
def split_documents(documents: list[Document]) -> tuple[list[Document], list[Document]]:
# Splits documents for correct Text Splitter
text_docs, python_docs = [], []
for doc in documents:
file_extension = os.path.splitext(doc.metadata["source"])[1]
if file_extension == ".py":
python_docs.append(doc)
else:
text_docs.append(doc)
return text_docs, python_docs
@click.command()
@click.option(
"--device_type",
default=DEVICE_TYPE,
type=click.Choice(
[
"cpu",
"cuda",
"ipu",
"xpu",
"mkldnn",
"opengl",
"opencl",
"ideep",
"hip",
"ve",
"fpga",
"ort",
"xla",
"lazy",
"vulkan",
"mps",
"meta",
"hpu",
"mtia",
],
),
help="Device to run on. (Default is cuda)",
)
def main(device_type):
# Load documents and split in chunks
logging.info(f"Loading documents from {SOURCE_DIRECTORY}")
documents = load_documents(SOURCE_DIRECTORY)
text_documents, python_documents = split_documents(documents)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
python_splitter = RecursiveCharacterTextSplitter.from_language(
language=Language.PYTHON, chunk_size=1000, chunk_overlap=200
)
texts = text_splitter.split_documents(text_documents)
texts.extend(python_splitter.split_documents(python_documents))
logging.info(f"Loaded {len(documents)} documents from {SOURCE_DIRECTORY}")
logging.info(f"Split into {len(texts)} chunks of text")
# Create embeddings
embeddings = HuggingFaceInstructEmbeddings(
model_name=EMBEDDING_MODEL_NAME,
model_kwargs={"device": device_type},
)
# change the embedding type here if you are running into issues.
# These are much smaller embeddings and will work for most appications
# If you use HuggingFaceEmbeddings, make sure to also use the same in the
# run_localGPT.py file.
# embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)
db = Chroma.from_documents(
texts,
embeddings,
persist_directory=PERSIST_DIRECTORY,
)
db.persist()
db = None
def load_local_chroma_db(db_dir=None, device_type=DEVICE_TYPE):
if not db_dir:
db_dir = PERSIST_DIRECTORY
if os.path.exists(db_dir):
embeddings = HuggingFaceInstructEmbeddings(
model_name=EMBEDDING_MODEL_NAME,
model_kwargs={"device": device_type},
)
db = Chroma(persist_directory=db_dir, embedding_function=embeddings)
return db
else:
print("The directory does not exist")
def add_text_to_db(texts: Iterable[str], db_dir=None):
db = load_local_chroma_db(db_dir)
if db:
db.add_texts(texts)
print(f"uploaded {len(texts)} text chunks in chroma!")
logging.info(f"uploaded {len(texts)} text chunks in chroma!")
else:
print("No existing chroma db!")
def db_similarity_search(query, top_k=4, db_dir=None):
db = load_local_chroma_db(db_dir)
if db:
result = db.similarity_search(query, k=top_k)
return result
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s", level=logging.INFO
)
main()
# add_text_to_db(["Hellow world", "This is Bryan speaker!"])
# results = db_similarity_search("Bryan speaker!", top_k=4, db_dir=None)
# for r in results:
# print(r.page_content)
| [] |
2024-01-10 | hamzeiehsan/LLM_Heritage_Documents | HVConstants.py | import os
# from dotenv import load_dotenv
from chromadb.config import Settings
# https://python.langchain.com/en/latest/modules/indexes/document_loaders/examples/excel.html?highlight=xlsx#microsoft-excel
from langchain.document_loaders import CSVLoader, PDFMinerLoader, TextLoader, UnstructuredExcelLoader, Docx2txtLoader
# load_dotenv()
ROOT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
# Define the folder for storing database
SOURCE_DIRECTORY = f"{ROOT_DIRECTORY}/data"
PERSIST_DIRECTORY = f"{ROOT_DIRECTORY}/DB"
# Can be changed to a specific number
INGEST_THREADS = os.cpu_count() or 8
# https://python.langchain.com/en/latest/_modules/langchain/document_loaders/excel.html#UnstructuredExcelLoader
DOCUMENT_MAP = {
".txt": TextLoader,
".md": TextLoader,
".py": TextLoader,
".pdf": PDFMinerLoader,
".csv": CSVLoader,
".xls": UnstructuredExcelLoader,
".xlxs": UnstructuredExcelLoader,
".docx": Docx2txtLoader,
".doc": Docx2txtLoader,
}
# Default Instructor Model
EMBEDDING_MODEL_NAME = "hkunlp/instructor-large"
# You can also choose a smaller model, don't forget to change HuggingFaceInstructEmbeddings
# to HuggingFaceEmbeddings in both ingest.py and run_localGPT.py
# EMBEDDING_MODEL_NAME = "all-MiniLM-L6-v2"
DEVICE_TYPE = "cuda"
# for HF models
# MODEL_ID = "TheBloke/vicuna-7B-1.1-HF"
# MODEL_ID = "TheBloke/Wizard-Vicuna-7B-Uncensored-HF"
MODEL_ID = "TheBloke/guanaco-7B-HF"
# MODEL_ID = 'NousResearch/Nous-Hermes-13b' # Requires ~ 23GB VRAM.
# Using STransformers alongside will 100% create OOM on 24GB cards.
# for GPTQ (quantized) models
# MODEL_ID = "TheBloke/Nous-Hermes-13B-GPTQ"
# MODEL_BASENAME = "nous-hermes-13b-GPTQ-4bit-128g.no-act.order"
# MODEL_ID = "TheBloke/WizardLM-30B-Uncensored-GPTQ"
# MODEL_BASENAME = "WizardLM-30B-Uncensored-GPTQ-4bit.act-order.safetensors"
# Requires ~21GB VRAM. Using STransformers alongside can potentially create OOM on 24GB cards.
# MODEL_ID = "TheBloke/wizardLM-7B-GPTQ"
# MODEL_BASENAME = "wizardLM-7B-GPTQ-4bit.compat.no-act-order.safetensors"
MODEL_ID = "TheBloke/WizardLM-7B-uncensored-GPTQ"
MODEL_BASENAME = "WizardLM-7B-uncensored-GPTQ-4bit-128g.compat.no-act-order.safetensors"
| [] |
2024-01-10 | AlexHTW/chatgpt-telegram-bot | bot~telegram_bot.py | from __future__ import annotations
import asyncio
import logging
import os
from uuid import uuid4
from telegram import BotCommandScopeAllGroupChats, Update, constants
from telegram import InlineKeyboardMarkup, InlineKeyboardButton, InlineQueryResultArticle
from telegram import InputTextMessageContent, BotCommand
from telegram.error import RetryAfter, TimedOut
from telegram.ext import ApplicationBuilder, CommandHandler, MessageHandler, \
filters, InlineQueryHandler, CallbackQueryHandler, Application, ContextTypes, CallbackContext
from pydub import AudioSegment
from utils import is_group_chat, get_thread_id, message_text, wrap_with_indicator, split_into_chunks, \
edit_message_with_retry, get_stream_cutoff_values, is_allowed, get_remaining_budget, is_admin, is_within_budget, \
get_reply_to_message_id, add_chat_request_to_usage_tracker, error_handler, is_direct_result, handle_direct_result, \
cleanup_intermediate_files
from openai_helper import OpenAIHelper, localized_text
from usage_tracker import UsageTracker
class ChatGPTTelegramBot:
"""
Class representing a ChatGPT Telegram Bot.
"""
def __init__(self, config: dict, openai: OpenAIHelper):
"""
Initializes the bot with the given configuration and GPT bot object.
:param config: A dictionary containing the bot configuration
:param openai: OpenAIHelper object
"""
self.config = config
self.openai = openai
bot_language = self.config['bot_language']
self.commands = [
BotCommand(command='help', description=localized_text('help_description', bot_language)),
BotCommand(command='reset', description=localized_text('reset_description', bot_language)),
BotCommand(command='stats', description=localized_text('stats_description', bot_language)),
BotCommand(command='resend', description=localized_text('resend_description', bot_language))
]
# If imaging is enabled, add the "image" command to the list
if self.config.get('enable_image_generation', False):
self.commands.append(BotCommand(command='image', description=localized_text('image_description', bot_language)))
if self.config.get('enable_tts_generation', False):
self.commands.append(BotCommand(command='tts', description=localized_text('tts_description', bot_language)))
self.group_commands = [BotCommand(
command='chat', description=localized_text('chat_description', bot_language)
)] + self.commands
self.disallowed_message = localized_text('disallowed', bot_language)
self.budget_limit_message = localized_text('budget_limit', bot_language)
self.usage = {}
self.last_message = {}
self.inline_queries_cache = {}
async def help(self, update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:
"""
Shows the help menu.
"""
commands = self.group_commands if is_group_chat(update) else self.commands
commands_description = [f'/{command.command} - {command.description}' for command in commands]
bot_language = self.config['bot_language']
help_text = (
localized_text('help_text', bot_language)[0] +
'\n\n' +
'\n'.join(commands_description) +
'\n\n' +
localized_text('help_text', bot_language)[1] +
'\n\n' +
localized_text('help_text', bot_language)[2]
)
await update.message.reply_text(help_text, disable_web_page_preview=True)
async def stats(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Returns token usage statistics for current day and month.
"""
if not await is_allowed(self.config, update, context):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'is not allowed to request their usage statistics')
await self.send_disallowed_message(update, context)
return
logging.info(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'requested their usage statistics')
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
tokens_today, tokens_month = self.usage[user_id].get_current_token_usage()
images_today, images_month = self.usage[user_id].get_current_image_count()
(transcribe_minutes_today, transcribe_seconds_today, transcribe_minutes_month,
transcribe_seconds_month) = self.usage[user_id].get_current_transcription_duration()
characters_today, characters_month = self.usage[user_id].get_current_tts_usage()
current_cost = self.usage[user_id].get_current_cost()
chat_id = update.effective_chat.id
chat_messages, chat_token_length = self.openai.get_conversation_stats(chat_id)
remaining_budget = get_remaining_budget(self.config, self.usage, update)
bot_language = self.config['bot_language']
text_current_conversation = (
f"*{localized_text('stats_conversation', bot_language)[0]}*:\n"
f"{chat_messages} {localized_text('stats_conversation', bot_language)[1]}\n"
f"{chat_token_length} {localized_text('stats_conversation', bot_language)[2]}\n"
f"----------------------------\n"
)
# Check if image generation is enabled and, if so, generate the image statistics for today
text_today_images = ""
if self.config.get('enable_image_generation', False):
text_today_images = f"{images_today} {localized_text('stats_images', bot_language)}\n"
text_today_tts = ""
if self.config.get('enable_tts_generation', False):
text_today_tts = f"{characters_today} {localized_text('stats_tts', bot_language)}\n"
text_today = (
f"*{localized_text('usage_today', bot_language)}:*\n"
f"{tokens_today} {localized_text('stats_tokens', bot_language)}\n"
f"{text_today_images}" # Include the image statistics for today if applicable
f"{text_today_tts}"
f"{transcribe_minutes_today} {localized_text('stats_transcribe', bot_language)[0]} "
f"{transcribe_seconds_today} {localized_text('stats_transcribe', bot_language)[1]}\n"
f"{localized_text('stats_total', bot_language)}{current_cost['cost_today']:.2f}\n"
f"----------------------------\n"
)
text_month_images = ""
if self.config.get('enable_image_generation', False):
text_month_images = f"{images_month} {localized_text('stats_images', bot_language)}\n"
text_month_tts = ""
if self.config.get('enable_tts_generation', False):
text_month_tts = f"{characters_month} {localized_text('stats_tts', bot_language)}\n"
# Check if image generation is enabled and, if so, generate the image statistics for the month
text_month = (
f"*{localized_text('usage_month', bot_language)}:*\n"
f"{tokens_month} {localized_text('stats_tokens', bot_language)}\n"
f"{text_month_images}" # Include the image statistics for the month if applicable
f"{text_month_tts}"
f"{transcribe_minutes_month} {localized_text('stats_transcribe', bot_language)[0]} "
f"{transcribe_seconds_month} {localized_text('stats_transcribe', bot_language)[1]}\n"
f"{localized_text('stats_total', bot_language)}{current_cost['cost_month']:.2f}"
)
# text_budget filled with conditional content
text_budget = "\n\n"
budget_period = self.config['budget_period']
if remaining_budget < float('inf'):
text_budget += (
f"{localized_text('stats_budget', bot_language)}"
f"{localized_text(budget_period, bot_language)}: "
f"${remaining_budget:.2f}.\n"
)
# No longer works as of July 21st 2023, as OpenAI has removed the billing API
# add OpenAI account information for admin request
# if is_admin(self.config, user_id):
# text_budget += (
# f"{localized_text('stats_openai', bot_language)}"
# f"{self.openai.get_billing_current_month():.2f}"
# )
usage_text = text_current_conversation + text_today + text_month + text_budget
await update.message.reply_text(usage_text, parse_mode=constants.ParseMode.MARKDOWN)
async def resend(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Resend the last request
"""
if not await is_allowed(self.config, update, context):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id})'
f' is not allowed to resend the message')
await self.send_disallowed_message(update, context)
return
chat_id = update.effective_chat.id
if chat_id not in self.last_message:
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id})'
f' does not have anything to resend')
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=localized_text('resend_failed', self.config['bot_language'])
)
return
# Update message text, clear self.last_message and send the request to prompt
logging.info(f'Resending the last prompt from user: {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
with update.message._unfrozen() as message:
message.text = self.last_message.pop(chat_id)
await self.prompt(update=update, context=context)
async def reset(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Resets the conversation.
"""
if not await is_allowed(self.config, update, context):
logging.warning(f'User {update.message.from_user.name} (id: {update.message.from_user.id}) '
f'is not allowed to reset the conversation')
await self.send_disallowed_message(update, context)
return
logging.info(f'Resetting the conversation for user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})...')
chat_id = update.effective_chat.id
reset_content = message_text(update.message)
self.openai.reset_chat_history(chat_id=chat_id, content=reset_content)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=localized_text('reset_done', self.config['bot_language'])
)
async def image(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Generates an image for the given prompt using DALL·E APIs
"""
if not self.config['enable_image_generation'] \
or not await self.check_allowed_and_within_budget(update, context):
return
image_query = message_text(update.message)
if image_query == '':
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=localized_text('image_no_prompt', self.config['bot_language'])
)
return
logging.info(f'New image generation request received from user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
async def _generate():
try:
image_url, image_size = await self.openai.generate_image(prompt=image_query)
if self.config['image_receive_mode'] == 'photo':
await update.effective_message.reply_photo(
reply_to_message_id=get_reply_to_message_id(self.config, update),
photo=image_url
)
elif self.config['image_receive_mode'] == 'document':
await update.effective_message.reply_document(
reply_to_message_id=get_reply_to_message_id(self.config, update),
document=image_url
)
else:
raise Exception(f"env variable IMAGE_RECEIVE_MODE has invalid value {self.config['image_receive_mode']}")
# add image request to users usage tracker
user_id = update.message.from_user.id
self.usage[user_id].add_image_request(image_size, self.config['image_prices'])
# add guest chat request to guest usage tracker
if str(user_id) not in self.config['allowed_user_ids'].split(',') and 'guests' in self.usage:
self.usage["guests"].add_image_request(image_size, self.config['image_prices'])
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('image_fail', self.config['bot_language'])}: {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
await wrap_with_indicator(update, context, _generate, constants.ChatAction.UPLOAD_PHOTO)
async def tts(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Generates an speech for the given input using TTS APIs
"""
if not self.config['enable_tts_generation'] \
or not await self.check_allowed_and_within_budget(update, context):
return
tts_query = message_text(update.message)
if tts_query == '':
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=localized_text('tts_no_prompt', self.config['bot_language'])
)
return
logging.info(f'New speech generation request received from user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
async def _generate():
try:
speech_file, text_length = await self.openai.generate_speech(text=tts_query)
await update.effective_message.reply_voice(
reply_to_message_id=get_reply_to_message_id(self.config, update),
voice=speech_file
)
speech_file.close()
# add image request to users usage tracker
user_id = update.message.from_user.id
self.usage[user_id].add_tts_request(text_length, self.config['tts_model'], self.config['tts_prices'])
# add guest chat request to guest usage tracker
if str(user_id) not in self.config['allowed_user_ids'].split(',') and 'guests' in self.usage:
self.usage["guests"].add_tts_request(text_length, self.config['tts_model'], self.config['tts_prices'])
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('tts_fail', self.config['bot_language'])}: {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
await wrap_with_indicator(update, context, _generate, constants.ChatAction.UPLOAD_VOICE)
async def transcribe(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
Transcribe audio messages.
"""
if not self.config['enable_transcription'] or not await self.check_allowed_and_within_budget(update, context):
return
if is_group_chat(update) and self.config['ignore_group_transcriptions']:
logging.info(f'Transcription coming from group chat, ignoring...')
return
chat_id = update.effective_chat.id
filename = update.message.effective_attachment.file_unique_id
async def _execute():
filename_mp3 = f'{filename}.mp3'
bot_language = self.config['bot_language']
try:
media_file = await context.bot.get_file(update.message.effective_attachment.file_id)
await media_file.download_to_drive(filename)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=(
f"{localized_text('media_download_fail', bot_language)[0]}: "
f"{str(e)}. {localized_text('media_download_fail', bot_language)[1]}"
),
parse_mode=constants.ParseMode.MARKDOWN
)
return
try:
audio_track = AudioSegment.from_file(filename)
audio_track.export(filename_mp3, format="mp3")
logging.info(f'New transcribe request received from user {update.message.from_user.name} '
f'(id: {update.message.from_user.id})')
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=localized_text('media_type_fail', bot_language)
)
if os.path.exists(filename):
os.remove(filename)
return
user_id = update.message.from_user.id
if user_id not in self.usage:
self.usage[user_id] = UsageTracker(user_id, update.message.from_user.name)
try:
transcript = await self.openai.transcribe(filename_mp3)
transcription_price = self.config['transcription_price']
self.usage[user_id].add_transcription_seconds(audio_track.duration_seconds, transcription_price)
allowed_user_ids = self.config['allowed_user_ids'].split(',')
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_transcription_seconds(audio_track.duration_seconds, transcription_price)
# check if transcript starts with any of the prefixes
response_to_transcription = any(transcript.lower().startswith(prefix.lower()) if prefix else False
for prefix in self.config['voice_reply_prompts'])
if self.config['voice_reply_transcript'] and not response_to_transcription:
# Split into chunks of 4096 characters (Telegram's message limit)
transcript_output = f"_{localized_text('transcript', bot_language)}:_\n\"{transcript}\""
chunks = split_into_chunks(transcript_output)
for index, transcript_chunk in enumerate(chunks):
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update) if index == 0 else None,
text=transcript_chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
else:
# Get the response of the transcript
response, total_tokens = await self.openai.get_chat_response(chat_id=chat_id, query=transcript)
self.usage[user_id].add_chat_tokens(total_tokens, self.config['token_price'])
if str(user_id) not in allowed_user_ids and 'guests' in self.usage:
self.usage["guests"].add_chat_tokens(total_tokens, self.config['token_price'])
# Split into chunks of 4096 characters (Telegram's message limit)
transcript_output = (
f"_{localized_text('transcript', bot_language)}:_\n\"{transcript}\"\n\n"
f"_{localized_text('answer', bot_language)}:_\n{response}"
)
chunks = split_into_chunks(transcript_output)
for index, transcript_chunk in enumerate(chunks):
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update) if index == 0 else None,
text=transcript_chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('transcribe_fail', bot_language)}: {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
finally:
if os.path.exists(filename_mp3):
os.remove(filename_mp3)
if os.path.exists(filename):
os.remove(filename)
await wrap_with_indicator(update, context, _execute, constants.ChatAction.TYPING)
async def prompt(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
"""
React to incoming messages and respond accordingly.
"""
if update.edited_message or not update.message or update.message.via_bot:
return
if not await self.check_allowed_and_within_budget(update, context):
return
logging.info(
f'New message received from user {update.message.from_user.name} (id: {update.message.from_user.id})')
chat_id = update.effective_chat.id
user_id = update.message.from_user.id
prompt = message_text(update.message)
self.last_message[chat_id] = prompt
if is_group_chat(update):
trigger_keyword = self.config['group_trigger_keyword']
if prompt.lower().startswith(trigger_keyword.lower()) or update.message.text.lower().startswith('/chat'):
if prompt.lower().startswith(trigger_keyword.lower()):
prompt = prompt[len(trigger_keyword):].strip()
if update.message.reply_to_message and \
update.message.reply_to_message.text and \
update.message.reply_to_message.from_user.id != context.bot.id:
prompt = f'"{update.message.reply_to_message.text}" {prompt}'
else:
if update.message.reply_to_message and update.message.reply_to_message.from_user.id == context.bot.id:
logging.info('Message is a reply to the bot, allowing...')
else:
logging.warning('Message does not start with trigger keyword, ignoring...')
return
try:
total_tokens = 0
if self.config['stream']:
await update.effective_message.reply_chat_action(
action=constants.ChatAction.TYPING,
message_thread_id=get_thread_id(update)
)
stream_response = self.openai.get_chat_response_stream(chat_id=chat_id, query=prompt)
i = 0
prev = ''
sent_message = None
backoff = 0
stream_chunk = 0
async for content, tokens in stream_response:
if is_direct_result(content):
return await handle_direct_result(self.config, update, content)
if len(content.strip()) == 0:
continue
stream_chunks = split_into_chunks(content)
if len(stream_chunks) > 1:
content = stream_chunks[-1]
if stream_chunk != len(stream_chunks) - 1:
stream_chunk += 1
try:
await edit_message_with_retry(context, chat_id, str(sent_message.message_id),
stream_chunks[-2])
except:
pass
try:
sent_message = await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=content if len(content) > 0 else "..."
)
except:
pass
continue
cutoff = get_stream_cutoff_values(update, content)
cutoff += backoff
if i == 0:
try:
if sent_message is not None:
await context.bot.delete_message(chat_id=sent_message.chat_id,
message_id=sent_message.message_id)
sent_message = await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=content,
)
except:
continue
elif abs(len(content) - len(prev)) > cutoff or tokens != 'not_finished':
prev = content
try:
use_markdown = tokens != 'not_finished'
await edit_message_with_retry(context, chat_id, str(sent_message.message_id),
text=content, markdown=use_markdown)
except RetryAfter as e:
backoff += 5
await asyncio.sleep(e.retry_after)
continue
except TimedOut:
backoff += 5
await asyncio.sleep(0.5)
continue
except Exception:
backoff += 5
continue
await asyncio.sleep(0.01)
i += 1
if tokens != 'not_finished':
total_tokens = int(tokens)
else:
async def _reply():
nonlocal total_tokens
response, total_tokens = await self.openai.get_chat_response(chat_id=chat_id, query=prompt)
if is_direct_result(response):
return await handle_direct_result(self.config, update, response)
# Split into chunks of 4096 characters (Telegram's message limit)
chunks = split_into_chunks(response)
for index, chunk in enumerate(chunks):
try:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config,
update) if index == 0 else None,
text=chunk,
parse_mode=constants.ParseMode.MARKDOWN
)
except Exception:
try:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config,
update) if index == 0 else None,
text=chunk
)
except Exception as exception:
raise exception
await wrap_with_indicator(update, context, _reply, constants.ChatAction.TYPING)
add_chat_request_to_usage_tracker(self.usage, self.config, user_id, total_tokens)
except Exception as e:
logging.exception(e)
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
reply_to_message_id=get_reply_to_message_id(self.config, update),
text=f"{localized_text('chat_fail', self.config['bot_language'])} {str(e)}",
parse_mode=constants.ParseMode.MARKDOWN
)
async def inline_query(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Handle the inline query. This is run when you type: @botusername <query>
"""
query = update.inline_query.query
if len(query) < 3:
return
if not await self.check_allowed_and_within_budget(update, context, is_inline=True):
return
callback_data_suffix = "gpt:"
result_id = str(uuid4())
self.inline_queries_cache[result_id] = query
callback_data = f'{callback_data_suffix}{result_id}'
await self.send_inline_query_result(update, result_id, message_content=query, callback_data=callback_data)
async def send_inline_query_result(self, update: Update, result_id, message_content, callback_data=""):
"""
Send inline query result
"""
try:
reply_markup = None
bot_language = self.config['bot_language']
if callback_data:
reply_markup = InlineKeyboardMarkup([[
InlineKeyboardButton(text=f'🤖 {localized_text("answer_with_chatgpt", bot_language)}',
callback_data=callback_data)
]])
inline_query_result = InlineQueryResultArticle(
id=result_id,
title=localized_text("ask_chatgpt", bot_language),
input_message_content=InputTextMessageContent(message_content),
description=message_content,
thumb_url='https://user-images.githubusercontent.com/11541888/223106202-7576ff11-2c8e-408d-94ea'
'-b02a7a32149a.png',
reply_markup=reply_markup
)
await update.inline_query.answer([inline_query_result], cache_time=0)
except Exception as e:
logging.error(f'An error occurred while generating the result card for inline query {e}')
async def handle_callback_inline_query(self, update: Update, context: CallbackContext):
"""
Handle the callback query from the inline query result
"""
callback_data = update.callback_query.data
user_id = update.callback_query.from_user.id
inline_message_id = update.callback_query.inline_message_id
name = update.callback_query.from_user.name
callback_data_suffix = "gpt:"
query = ""
bot_language = self.config['bot_language']
answer_tr = localized_text("answer", bot_language)
loading_tr = localized_text("loading", bot_language)
try:
if callback_data.startswith(callback_data_suffix):
unique_id = callback_data.split(':')[1]
total_tokens = 0
# Retrieve the prompt from the cache
query = self.inline_queries_cache.get(unique_id)
if query:
self.inline_queries_cache.pop(unique_id)
else:
error_message = (
f'{localized_text("error", bot_language)}. '
f'{localized_text("try_again", bot_language)}'
)
await edit_message_with_retry(context, chat_id=None, message_id=inline_message_id,
text=f'{query}\n\n_{answer_tr}:_\n{error_message}',
is_inline=True)
return
unavailable_message = localized_text("function_unavailable_in_inline_mode", bot_language)
if self.config['stream']:
stream_response = self.openai.get_chat_response_stream(chat_id=user_id, query=query)
i = 0
prev = ''
backoff = 0
async for content, tokens in stream_response:
if is_direct_result(content):
cleanup_intermediate_files(content)
await edit_message_with_retry(context, chat_id=None,
message_id=inline_message_id,
text=f'{query}\n\n_{answer_tr}:_\n{unavailable_message}',
is_inline=True)
return
if len(content.strip()) == 0:
continue
cutoff = get_stream_cutoff_values(update, content)
cutoff += backoff
if i == 0:
try:
await edit_message_with_retry(context, chat_id=None,
message_id=inline_message_id,
text=f'{query}\n\n{answer_tr}:\n{content}',
is_inline=True)
except:
continue
elif abs(len(content) - len(prev)) > cutoff or tokens != 'not_finished':
prev = content
try:
use_markdown = tokens != 'not_finished'
divider = '_' if use_markdown else ''
text = f'{query}\n\n{divider}{answer_tr}:{divider}\n{content}'
# We only want to send the first 4096 characters. No chunking allowed in inline mode.
text = text[:4096]
await edit_message_with_retry(context, chat_id=None, message_id=inline_message_id,
text=text, markdown=use_markdown, is_inline=True)
except RetryAfter as e:
backoff += 5
await asyncio.sleep(e.retry_after)
continue
except TimedOut:
backoff += 5
await asyncio.sleep(0.5)
continue
except Exception:
backoff += 5
continue
await asyncio.sleep(0.01)
i += 1
if tokens != 'not_finished':
total_tokens = int(tokens)
else:
async def _send_inline_query_response():
nonlocal total_tokens
# Edit the current message to indicate that the answer is being processed
await context.bot.edit_message_text(inline_message_id=inline_message_id,
text=f'{query}\n\n_{answer_tr}:_\n{loading_tr}',
parse_mode=constants.ParseMode.MARKDOWN)
logging.info(f'Generating response for inline query by {name}')
response, total_tokens = await self.openai.get_chat_response(chat_id=user_id, query=query)
if is_direct_result(response):
cleanup_intermediate_files(response)
await edit_message_with_retry(context, chat_id=None,
message_id=inline_message_id,
text=f'{query}\n\n_{answer_tr}:_\n{unavailable_message}',
is_inline=True)
return
text_content = f'{query}\n\n_{answer_tr}:_\n{response}'
# We only want to send the first 4096 characters. No chunking allowed in inline mode.
text_content = text_content[:4096]
# Edit the original message with the generated content
await edit_message_with_retry(context, chat_id=None, message_id=inline_message_id,
text=text_content, is_inline=True)
await wrap_with_indicator(update, context, _send_inline_query_response,
constants.ChatAction.TYPING, is_inline=True)
add_chat_request_to_usage_tracker(self.usage, self.config, user_id, total_tokens)
except Exception as e:
logging.error(f'Failed to respond to an inline query via button callback: {e}')
logging.exception(e)
localized_answer = localized_text('chat_fail', self.config['bot_language'])
await edit_message_with_retry(context, chat_id=None, message_id=inline_message_id,
text=f"{query}\n\n_{answer_tr}:_\n{localized_answer} {str(e)}",
is_inline=True)
async def check_allowed_and_within_budget(self, update: Update, context: ContextTypes.DEFAULT_TYPE,
is_inline=False) -> bool:
"""
Checks if the user is allowed to use the bot and if they are within their budget
:param update: Telegram update object
:param context: Telegram context object
:param is_inline: Boolean flag for inline queries
:return: Boolean indicating if the user is allowed to use the bot
"""
name = update.inline_query.from_user.name if is_inline else update.message.from_user.name
user_id = update.inline_query.from_user.id if is_inline else update.message.from_user.id
if not await is_allowed(self.config, update, context, is_inline=is_inline):
logging.warning(f'User {name} (id: {user_id}) is not allowed to use the bot')
await self.send_disallowed_message(update, context, is_inline)
return False
if not is_within_budget(self.config, self.usage, update, is_inline=is_inline):
logging.warning(f'User {name} (id: {user_id}) reached their usage limit')
await self.send_budget_reached_message(update, context, is_inline)
return False
return True
async def send_disallowed_message(self, update: Update, _: ContextTypes.DEFAULT_TYPE, is_inline=False):
"""
Sends the disallowed message to the user.
"""
if not is_inline:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=self.disallowed_message,
disable_web_page_preview=True
)
else:
result_id = str(uuid4())
await self.send_inline_query_result(update, result_id, message_content=self.disallowed_message)
async def send_budget_reached_message(self, update: Update, _: ContextTypes.DEFAULT_TYPE, is_inline=False):
"""
Sends the budget reached message to the user.
"""
if not is_inline:
await update.effective_message.reply_text(
message_thread_id=get_thread_id(update),
text=self.budget_limit_message
)
else:
result_id = str(uuid4())
await self.send_inline_query_result(update, result_id, message_content=self.budget_limit_message)
async def post_init(self, application: Application) -> None:
"""
Post initialization hook for the bot.
"""
await application.bot.set_my_commands(self.group_commands, scope=BotCommandScopeAllGroupChats())
await application.bot.set_my_commands(self.commands)
def run(self):
"""
Runs the bot indefinitely until the user presses Ctrl+C
"""
application = ApplicationBuilder() \
.token(self.config['token']) \
.proxy_url(self.config['proxy']) \
.get_updates_proxy_url(self.config['proxy']) \
.post_init(self.post_init) \
.concurrent_updates(True) \
.build()
application.add_handler(CommandHandler('reset', self.reset))
application.add_handler(CommandHandler('help', self.help))
application.add_handler(CommandHandler('image', self.image))
application.add_handler(CommandHandler('tts', self.tts))
application.add_handler(CommandHandler('start', self.help))
application.add_handler(CommandHandler('stats', self.stats))
application.add_handler(CommandHandler('resend', self.resend))
application.add_handler(CommandHandler(
'chat', self.prompt, filters=filters.ChatType.GROUP | filters.ChatType.SUPERGROUP)
)
application.add_handler(MessageHandler(
filters.AUDIO | filters.VOICE | filters.Document.AUDIO |
filters.VIDEO | filters.VIDEO_NOTE | filters.Document.VIDEO,
self.transcribe))
application.add_handler(MessageHandler(filters.TEXT & (~filters.COMMAND), self.prompt))
application.add_handler(InlineQueryHandler(self.inline_query, chat_types=[
constants.ChatType.GROUP, constants.ChatType.SUPERGROUP, constants.ChatType.PRIVATE
]))
application.add_handler(CallbackQueryHandler(self.handle_callback_inline_query))
application.add_error_handler(error_handler)
application.run_polling()
| [] |
2024-01-10 | profjsb/skyportal | skyportal~handlers~api~summary_query.py | import os
import copy
import yaml
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
from typing import List, Optional
import pinecone
from baselayer.app.access import auth_or_token
from baselayer.app.env import load_env
from baselayer.log import make_log
from ..base import BaseHandler
from ...models import (
User,
)
_, cfg = load_env()
log = make_log('query')
# add in this new search method to the Pinecone class
def search_sources(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
) -> List[dict]:
"""Return pinecone documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Dictionary of argument(s) to filter on metadata
namespace: Namespace to search in. Default will search in '' namespace.
Returns:
List of source dictionaries most similar to the query and score for each
"""
if namespace is None:
namespace = self._namespace
query_obj = self._embedding_function(query)
sources = []
results = self._index.query(
[query_obj],
top_k=k,
include_metadata=True,
namespace=namespace,
filter=filter,
)
for res in results["matches"]:
try:
source = {
"id": res["id"],
"score": res["score"],
"metadata": res["metadata"],
}
sources.append(source)
except Exception as e:
log(f"Error: {e}")
continue
return sources
setattr(Pinecone, 'search_sources', search_sources)
# Preamble: get the embeddings and summary parameters ready
# for now, we only support pinecone embeddings
summarize_embedding_config = cfg[
'analysis_services.openai_analysis_service.embeddings_store.summary'
]
USE_PINECONE = False
if (
summarize_embedding_config.get("location") == "pinecone"
and summarize_embedding_config.get("api_key")
and summarize_embedding_config.get("environment")
and summarize_embedding_config.get("index_name")
and summarize_embedding_config.get("index_size")
):
log("initializing pinecone access...")
pinecone.init(
api_key=summarize_embedding_config.get("api_key"),
environment=summarize_embedding_config.get("environment"),
)
summarize_embedding_index_name = summarize_embedding_config.get("index_name")
summarize_embedding_index_size = summarize_embedding_config.get("index_size")
summarize_embedding_model = summarize_embedding_config.get("model")
if summarize_embedding_index_name in pinecone.list_indexes():
USE_PINECONE = True
else:
if cfg['database.database'] == 'skyportal_test':
USE_PINECONE = True
log("Setting USE_PINECONE=True as it seems like we are in a test environment")
else:
log("No valid pinecone configuration found. Please check the config file.")
summary_config = copy.deepcopy(cfg['analysis_services.openai_analysis_service.summary'])
if summary_config.get("api_key"):
# there may be a global API key set in the config file
openai_api_key = summary_config.pop("api_key")
elif os.path.exists(".secret"):
# try to get this key from the dev environment, useful for debugging
openai_api_key = yaml.safe_load(open(".secret")).get("OPENAI_API_KEY")
elif cfg['database.database'] == 'skyportal_test':
openai_api_key = "TEST_KEY"
else:
openai_api_key = None
class SummaryQueryHandler(BaseHandler):
@auth_or_token
def post(self):
"""
---
description: Get a list of sources with summaries matching the query
tags:
- summary
parameters:
- in: query
name: q
schema:
type: string
required: true
description: |
The query string. E.g. "What sources are associated with
an NGC galaxy?"
- in: query
name: k
schema:
type: int
minimum: 1
maximum: 100
description: |
Max number of sources to return. Default 5.
- in: query
name: z_min
schema:
type: float
nullable: true
description: |
Minimum redshift to consider of queries sources. If None or missing,
then no lower limit is applied.
- in: query
name: z_max
schema:
type: float
nullable: true
description: |
Maximum redshift to consider of queries sources. If None or missing,
then no upper limit is applied.
- in: query
name: classificationTypes
nullable: true
schema:
type: array
items:
type: string
description: |
List of classification types to consider. If [] or missing,
then all classification types are considered.
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
sources:
type: array
items:
$ref: '#/components/schemas/Obj'
400:
content:
application/json:
schema: Error
"""
if not USE_PINECONE:
return self.error(
"No valid pinecone configuration found. Please check your config file."
)
if not openai_api_key:
user_id = self.associated_user_object.id
with self.Session() as session:
user = session.scalars(
User.select(session.user_or_token, mode="read").where(
User.id == user_id
)
).first()
if user is None:
return self.error(
'No global OpenAI key found and cannot find user.', status=400
)
if user.preferences is not None and user.preferences.get(
"summary", {}
).get("OpenAI", {}).get('active', False):
user_pref_openai = user.preferences["summary"]["OpenAI"].get(
"apikey"
)
user_openai_key = user_pref_openai["apikey"]
else:
user_openai_key = openai_api_key
if not user_openai_key:
return self.error('No OpenAI API key found.', status=400)
data = self.get_json()
query = data.get('q')
if query in [None, '']:
return self.error('Missing required query string "q"')
k = data.get('k', 5)
if k < 1 or k > 100:
return self.error('k must be 1<=k<=100')
z_min = data.get('z_min', None)
z_max = data.get('z_max', None)
if z_min is not None and z_max is not None and z_min > z_max:
return self.error('z_min must be <= z_max')
classification_types = data.get('classificationTypes', None)
# construct the filter
if z_min is not None and z_max is None:
z_filt = {"redshift": {"$gte": z_min}}
elif z_min is None and z_max is not None:
z_filt = {"redshift": {"$lte": z_max}}
elif z_min is not None and z_max is not None:
z_filt = {
"$and": [{"redshift": {"$gte": z_min}}, {"redshift": {"$lte": z_max}}]
}
else:
z_filt = None
if classification_types not in [None, []]:
class_filt = {"class": {"$in": classification_types}}
else:
class_filt = None
if class_filt is not None and z_filt is not None:
filt = {"$and": [class_filt, z_filt]}
elif class_filt is not None:
filt = class_filt
elif z_filt is not None:
filt = z_filt
else:
filt = {}
try:
embeddings = OpenAIEmbeddings(
model=summarize_embedding_model,
embedding_ctx_length=summarize_embedding_index_size,
openai_api_key=user_openai_key,
)
docsearch = Pinecone.from_existing_index(
summarize_embedding_index_name, embeddings, text_key="summary"
)
except Exception as e:
return self.error(f'Could not load embeddings or pinecone index: {e}')
# get the top k sources
try:
results = docsearch.search_sources(query, k=k, filter=filt)
except Exception as e:
return self.error(f'Could not search sources: {e}')
return self.success(data={'query_results': results})
| [] |
2024-01-10 | grill-lab/OAT | offline~augmenters~fact_augmenter.py | import openai
import torch
import os
import pandas as pd
from taskmap_pb2 import ScreenInteraction, TaskMap, ExtraInfo, ExecutionStep
from utils import logger, get_file_system, Downloader
from .abstract_step_augmenter import AbstractBatchStepAugmenter
from sentence_transformers import SentenceTransformer, util
openai.api_key = ''
class FactAugmenter(AbstractBatchStepAugmenter):
def __init__(self):
super().__init__()
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.model = SentenceTransformer('all-MiniLM-L6-v2', cache_folder="/shared/file_system/cache/offline/models/1_Pooling",
device=self.device).to(self.device)
artefact_id = "facts_json"
downloader = Downloader()
downloader.download([artefact_id])
self.facts_data = pd.read_json(downloader.get_artefact_path(artefact_id))
self.facts = self.facts_data['fact']
self.facts = [fact for fact in self.facts if fact != ""]
self.fact_keywords = self.facts_data['keyword']
self.fact_keywords = [word for word in self.fact_keywords if word != ""]
self.fact_embeddings = self.model.encode(self.facts, convert_to_tensor=True, batch_size=128,
show_progress_bar=True)
logger.info('Fact embeddings successfully encoded')
def batch_process(self, batch):
input_list = []
output_list = []
for (_, step, _) in batch:
step_text = step.response.speech_text
input_list.append(step_text)
step_embeddings = self.model.encode(input_list, convert_to_tensor=True, batch_size=128, show_progress_bar=True)
logger.info('Step embeddings successfully encoded')
for step_idx, step_embedding in enumerate(step_embeddings):
similarity_scores = util.cos_sim(step_embedding, self.fact_embeddings)[0]
sorted_idxs = similarity_scores.argsort(descending=True)
reranked_facts = [self.facts[i] for i in sorted_idxs]
reranked_fact_keywords = [self.fact_keywords[i] for i in sorted_idxs]
reranked_fact_scores = similarity_scores[sorted_idxs].tolist()
most_relevant_fact = None
most_relevant_score = 0
for fact, fact_keyword, score in zip(reranked_facts, reranked_fact_keywords, reranked_fact_scores):
if score > max(0.2, most_relevant_score) and fact_keyword in input_list[step_idx]:
most_relevant_fact = fact
most_relevant_score = score
if most_relevant_score > 0:
output_list.append([most_relevant_fact])
else:
output_list.append([])
return output_list
def condition(self, step: ExecutionStep) -> bool:
return True
def apply_output(self, step: ExecutionStep, processed_output) -> ExecutionStep:
screen = ScreenInteraction()
screen.format = ScreenInteraction.ScreenFormat.TEXT_IMAGE
for fact in processed_output:
extra_info: ExtraInfo = ExtraInfo()
extra_info.type = ExtraInfo.InfoType.FUNFACT
extra_info.text = fact
extra_info.keyword = self.facts_data[self.facts_data["fact"] == fact]['keyword'].iloc[0]
image = self.facts_data[self.facts_data["fact"] == fact]['image'].iloc[0]
if image != "":
extra_info.image_url = image
screen.extra_information.append(extra_info)
logger.info(f"Matched fact '{fact}' with step '{step.response.speech_text}'")
step.response.screen.MergeFrom(screen)
return step
def get_transformed_input(self, task_map: TaskMap):
return None
| [] |
2024-01-10 | grill-lab/OAT | offline~category_index~abstract_taxonomy_builder.py | import hashlib
import json
import os
from abc import ABC, abstractmethod
from typing import Optional
import openai
import requests
from bs4 import BeautifulSoup
from requests.models import Response
from offline_pb2 import CategoryDocument, HTMLDocument
from utils import get_file_system, logger
class AbstractTaxonomyBuilder(ABC):
def __init__(self):
self.openai_descriptions_folder = os.path.join(get_file_system(), "offline", "openai_responses")
if not os.path.isdir(self.openai_descriptions_folder):
os.makedirs(self.openai_descriptions_folder)
openai.api_key = '' # must be filled to generate descriptions
@staticmethod
def get_docid(url: str):
""" Generate document unique identifier. """
return hashlib.md5(url.encode('utf-8')).hexdigest()
@staticmethod
def get_soup(url: str, html: Optional[Response] = None) -> Optional[BeautifulSoup]:
"""
Obtains a BeautifulSoup object for the given URL.
If the `html` parameter is an existing requests
response object, the BeautifulSoup object will be
parsed directly from that. Otherwise requests.get
is used to retrieve the HTML.
Returns:
A BeautifulSoup object for the given URL, or None on error.
"""
if html is None:
logger.info(f"get_soup retrieving HTML for {url}")
try:
html = requests.get(url)
except Exception as e:
logger.warning(f"get_soup failed to retrieve HTML for {url}: {e}")
return None
try:
soup = BeautifulSoup(html.text, 'html.parser')
except Exception as e:
logger.warning(f"get_soup failed to parse HTML for {url}: {e}")
return None
return soup
@staticmethod
def clean_text(s):
""" Standard cleaning of text. """
s = s.replace('</li><li>', '. ')
return s.replace('\n', ' ').replace('\t', ' ').strip() if s is not None else s
@staticmethod
def write_out(data):
if not os.path.isdir(os.path.join(get_file_system(), "offline/category_data")):
os.makedirs(os.path.join(get_file_system(), "offline/category_data"), exist_ok=True)
with open(os.path.join(get_file_system(), "offline/category_data/cat_data.jsonl"), "w") as json_file:
for cat in data:
json.dump(cat, json_file)
json_file.write('\n')
@abstractmethod
def build_taxonomy(self, url: str, html=None) -> CategoryDocument:
"""
Used to build the different categories available:
Args:
- urls (of the categories, e.g. {"url": https://www.wikihow.com/Category:Crafts, "name": "crafts"}
Returns:
- a CategoryDocument in the following format (if translated to a dictionary):
{
'title': '',
'description': '',
'options_available': True,
questions_turns
'sub_categories': [
{'title': '',
'candidates': [],
taskmap_ids
images
'thumbnail_url': ''
},
{'title': '',
'candidates': [''],
'thumbnail_url': ''
},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
{'title': '',
'candidates': [],
'thumbnail_url': ''
}
]
}
"""
pass
def parse(self, url: str, html: HTMLDocument) -> CategoryDocument:
category_doc = self.build_taxonomy(url, html)
if category_doc and len(category_doc.sub_categories) < 3:
return None
# if category_doc and category_doc.description == "":
# category_doc.description = self.generate_description(category_doc.title)
return category_doc
def generate_description(self, title: str) -> str:
"""Matches a description to a category. If there is no available description
for the given category, the description is generated with OpenAI"""
prompts_file: str = os.path.join(self.openai_descriptions_folder, "category_responses.txt")
title_prompt_dict: dict = {}
if os.path.isfile(prompts_file):
with open(prompts_file, "r") as fp:
title_prompt_dict = json.load(fp)
if title in title_prompt_dict.keys():
return title_prompt_dict[title]
prompt: str = f"Context: You are an Alexa Skill that helps and guides user with home improvement and cooking " \
f"tasks. Tasks are divided into different categories and these categories are further divided " \
f"into subcategories. When a subcategory is selected, tasks related to this subcategory are " \
f"recommended to the user. The purpose of having categories is to be able to deal with vague " \
f"queries such by narrowing down the search results. It is important that when a category is " \
f"selected, rather than a narrowed search, the user is informed that they entered a category. " \
f"After the opening sentences, 3 subcategories are recommended, but this is not part of the " \
f"opening statement!\n\nTask: When a user chooses the category '{title}' , " \
f"you will generate an opening statement. The initial sentence should convey the user's " \
f"selection in a positive and enjoyable manner. One or two sentences should follow which " \
f"aim to hype how great this category is, by mentioning interesting facts about it. Finally, " \
f"the last sentence must inform the user that there are several subcategories available " \
f"for them to explore. And choose from, but do not provide any examples."
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=100,
messages=[
{"role": "system", "content": prompt}
])
description: str = completion['choices'][0]['message']['content']
title_prompt_dict[title] = description
logger.info(f"For category '{title}' generated description: '{description}'")
with open(prompts_file, "w") as fp:
json.dump(title_prompt_dict, fp, indent=2)
return description
| [
"category_responses.txt",
"Context: You are an Alexa Skill that helps and guides user with home improvement and cooking tasks. Tasks are divided into different categories and these categories are further divided into subcategories. When a subcategory is selected, tasks related to this subcategory are recommended to the user. The purpose of having categories is to be able to deal with vague queries such by narrowing down the search results. It is important that when a category is selected, rather than a narrowed search, the user is informed that they entered a category. After the opening sentences, 3 subcategories are recommended, but this is not part of the opening statement!\n\nTask: When a user chooses the category 'PLACEHOLDER' , you will generate an opening statement. The initial sentence should convey the user's selection in a positive and enjoyable manner. One or two sentences should follow which aim to hype how great this category is, by mentioning interesting facts about it. Finally, the last sentence must inform the user that there are several subcategories available for them to explore. And choose from, but do not provide any examples.",
"{}"
] |
2024-01-10 | grill-lab/OAT | offline~augmenters~joke_augmenter.py | import openai
import torch
import os
import pandas as pd
from taskmap_pb2 import ScreenInteraction, TaskMap, ExtraInfo, ExecutionStep
from utils import logger, get_file_system, Downloader
from .abstract_step_augmenter import AbstractBatchStepAugmenter
from sentence_transformers import SentenceTransformer, util
openai.api_key = ''
class JokeAugmenter(AbstractBatchStepAugmenter):
def __init__(self):
super().__init__()
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.model = SentenceTransformer('all-MiniLM-L6-v2', cache_folder="/shared/file_system/cache/offline/models/1_Pooling",
device=self.device).to(self.device)
artefact_id = "jokes_json"
downloader = Downloader()
downloader.download([artefact_id])
self.jokes_data = pd.read_json(downloader.get_artefact_path(artefact_id))
self.jokes = self.jokes_data['joke']
self.jokes = [joke for joke in self.jokes if joke != ""]
self.joke_keywords = self.jokes_data['keyword']
self.joke_keywords = [word for word in self.joke_keywords if word != ""]
self.joke_embeddings = self.model.encode(self.jokes, convert_to_tensor=True,
batch_size=128, show_progress_bar=True)
logger.info('Joke embeddings successfully encoded')
def batch_process(self, batch):
input_list = []
output_list = []
for (_, step, _) in batch:
step_text = step.response.speech_text
input_list.append(step_text)
step_embeddings = self.model.encode(input_list, convert_to_tensor=True,
batch_size=128, show_progress_bar=True)
logger.info('Step embeddings successfully encoded')
for step_idx, step in enumerate(zip(input_list, step_embeddings)):
step_text, step_embedding = step
similarity_scores = util.cos_sim(step_embedding, self.joke_embeddings)[0]
sorted_idxs = similarity_scores.argsort(descending=True)
reranked_jokes = [self.jokes[i] for i in sorted_idxs]
reranked_joke_keywords = [self.joke_keywords[i] for i in sorted_idxs]
reranked_joke_scores = similarity_scores[sorted_idxs].tolist()
most_relevant_joke = None
most_relevant_score = 0
for joke, keyword, score in zip(reranked_jokes, reranked_joke_keywords, reranked_joke_scores):
if score > max(0.2, most_relevant_score) and keyword in input_list[step_idx]:
most_relevant_joke = joke
most_relevant_score = score
if most_relevant_score > 0:
output_list.append([most_relevant_joke])
else:
output_list.append([])
return output_list
def condition(self, step: ExecutionStep) -> bool:
potential_keywords = step.response.speech_text.strip().lower().split(" ")
matched_keywords = self.jokes_data[self.jokes_data['keyword'].isin(potential_keywords)]['keyword'].tolist()
return len(matched_keywords) > 0
def apply_output(self, step: ExecutionStep, processed_output) -> ExecutionStep:
screen = ScreenInteraction()
screen.format = ScreenInteraction.ScreenFormat.TEXT_IMAGE
for joke in processed_output:
extra_info = ExtraInfo(
type='JOKE',
text=joke,
keyword=self.jokes_data[self.jokes_data["joke"] == joke]['keyword'].iloc[0]
)
image = self.jokes_data[self.jokes_data["joke"] == joke]['image'].iloc[0]
if image != "":
extra_info.image_url = image
screen.extra_information.append(extra_info)
logger.info(f"Matched joke '{joke}' with step '{step.response.speech_text}'")
step.response.screen.MergeFrom(screen)
return step
def get_transformed_input(self, task_map: TaskMap):
return None
# def generate_jokes_from_step(self, step_text):
# prompt = f"""Tell me a clean joke that is related to {step_text} and is appropriate for children"""
#
# response = openai.Completion.create(
# engine="text-davinci-002",
# prompt=prompt,
# max_tokens=50,
# top_p=1,
# frequency_penalty=0.2,
# presence_penalty=0.5,
# temperature=0.5
# )
# generator = JokeGenerator()
# generator.add_joke(response.choices[0].text)
# joke = response.choices[0].text
#
# # with open(os.path.join(get_file_system(), 'offline/extra_info_data/jokes.txt'), 'a') as file:
# # file.append(joke + '\n')
#
# # logger.info("GENERATING JOKE" + kw + str(response.choices[0].text))
#
# return joke
| [] |
2024-01-10 | platisd/openai-pr-description | autofill_description.py | #!/usr/bin/env python3
import sys
import requests
import argparse
import json
import openai
import os
SAMPLE_PROMPT = """
Write a pull request description focusing on the motivation behind the change and why it improves the project.
Go straight to the point.
The title of the pull request is "Enable valgrind on CI" and the following changes took place:
Changes in file .github/workflows/build-ut-coverage.yml: @@ -24,6 +24,7 @@ jobs:
run: |
sudo apt-get update
sudo apt-get install -y lcov
+ sudo apt-get install -y valgrind
sudo apt-get install -y ${{ matrix.compiler.cc }}
sudo apt-get install -y ${{ matrix.compiler.cxx }}
- name: Checkout repository
@@ -48,3 +49,7 @@ jobs:
with:
files: coverage.info
fail_ci_if_error: true
+ - name: Run valgrind
+ run: |
+ valgrind --tool=memcheck --leak-check=full --leak-resolution=med \
+ --track-origins=yes --vgdb=no --error-exitcode=1 ${build_dir}/test/command_parser_test
Changes in file test/CommandParserTest.cpp: @@ -566,7 +566,7 @@ TEST(CommandParserTest, ParsedCommandImpl_WhenArgumentIsSupportedNumericTypeWill
unsigned long long expectedUnsignedLongLong { std::numeric_limits<unsigned long long>::max() };
float expectedFloat { -164223.123f }; // std::to_string does not play well with floating point min()
double expectedDouble { std::numeric_limits<double>::max() };
- long double expectedLongDouble { std::numeric_limits<long double>::max() };
+ long double expectedLongDouble { 123455678912349.1245678912349L };
auto command = UnparsedCommand::create(expectedCommand, "dummyDescription"s)
.withArgs<int, long, unsigned long, long long, unsigned long long, float, double, long double>();
"""
GOOD_SAMPLE_RESPONSE = """
Currently, our CI build does not include Valgrind as part of the build and test process. Valgrind is a powerful tool for detecting memory errors, and its use is essential for maintaining the integrity of our project.
This pull request adds Valgrind to the CI build, so that any memory errors will be detected and reported immediately. This will help to prevent undetected memory errors from making it into the production build.
Overall, this change will improve the quality of the project by helping us detect and prevent memory errors.
"""
def main():
parser = argparse.ArgumentParser(
description="Use ChatGPT to generate a description for a pull request."
)
parser.add_argument(
"--github-api-url", type=str, required=True, help="The GitHub API URL"
)
parser.add_argument(
"--github-repository", type=str, required=True, help="The GitHub repository"
)
parser.add_argument(
"--pull-request-id",
type=int,
required=True,
help="The pull request ID",
)
parser.add_argument(
"--github-token",
type=str,
required=True,
help="The GitHub token",
)
parser.add_argument(
"--openai-api-key",
type=str,
required=True,
help="The OpenAI API key",
)
parser.add_argument(
"--allowed-users",
type=str,
required=False,
help="A comma-separated list of GitHub usernames that are allowed to trigger the action, empty or missing means all users are allowed",
)
args = parser.parse_args()
github_api_url = args.github_api_url
repo = args.github_repository
github_token = args.github_token
pull_request_id = args.pull_request_id
openai_api_key = args.openai_api_key
allowed_users = os.environ.get("INPUT_ALLOWED_USERS", "")
if allowed_users:
allowed_users = allowed_users.split(",")
open_ai_model = os.environ.get("INPUT_OPENAI_MODEL", "gpt-3.5-turbo")
max_prompt_tokens = int(os.environ.get("INPUT_MAX_TOKENS", "1000"))
model_temperature = float(os.environ.get("INPUT_TEMPERATURE", "0.6"))
model_sample_prompt = os.environ.get("INPUT_MODEL_SAMPLE_PROMPT", SAMPLE_PROMPT)
model_sample_response = os.environ.get(
"INPUT_MODEL_SAMPLE_RESPONSE", GOOD_SAMPLE_RESPONSE
)
authorization_header = {
"Accept": "application/vnd.github.v3+json",
"Authorization": "token %s" % github_token,
}
pull_request_url = f"{github_api_url}/repos/{repo}/pulls/{pull_request_id}"
pull_request_result = requests.get(
pull_request_url,
headers=authorization_header,
)
if pull_request_result.status_code != requests.codes.ok:
print(
"Request to get pull request data failed: "
+ str(pull_request_result.status_code)
)
return 1
pull_request_data = json.loads(pull_request_result.text)
if pull_request_data["body"]:
print("Pull request already has a description, skipping")
return 0
if allowed_users:
pr_author = pull_request_data["user"]["login"]
if pr_author not in allowed_users:
print(
f"Pull request author {pr_author} is not allowed to trigger this action"
)
return 0
pull_request_title = pull_request_data["title"]
pull_request_files = []
# Request a maximum of 10 pages (300 files)
for page_num in range(1, 11):
pull_files_url = f"{pull_request_url}/files?page={page_num}&per_page=30"
pull_files_result = requests.get(
pull_files_url,
headers=authorization_header,
)
if pull_files_result.status_code != requests.codes.ok:
print(
"Request to get list of files failed with error code: "
+ str(pull_files_result.status_code)
)
return 1
pull_files_chunk = json.loads(pull_files_result.text)
if len(pull_files_chunk) == 0:
break
pull_request_files.extend(pull_files_chunk)
completion_prompt = f"""
Write a pull request description focusing on the motivation behind the change and why it improves the project.
Go straight to the point.
The title of the pull request is "{pull_request_title}" and the following changes took place: \n
"""
for pull_request_file in pull_request_files:
# Not all PR file metadata entries may contain a patch section
# For example, entries related to removed binary files may not contain it
if "patch" not in pull_request_file:
continue
filename = pull_request_file["filename"]
patch = pull_request_file["patch"]
completion_prompt += f"Changes in file {filename}: {patch}\n"
max_allowed_tokens = 2048 # 4096 is the maximum allowed by OpenAI for GPT-3.5
characters_per_token = 4 # The average number of characters per token
max_allowed_characters = max_allowed_tokens * characters_per_token
if len(completion_prompt) > max_allowed_characters:
completion_prompt = completion_prompt[:max_allowed_characters]
openai.api_key = openai_api_key
openai_response = openai.ChatCompletion.create(
model=open_ai_model,
messages=[
{
"role": "system",
"content": "You are a helpful assistant who writes pull request descriptions",
},
{"role": "user", "content": model_sample_prompt},
{"role": "assistant", "content": model_sample_response},
{"role": "user", "content": completion_prompt},
],
temperature=model_temperature,
max_tokens=max_prompt_tokens,
)
generated_pr_description = openai_response.choices[0].message.content
redundant_prefix = "This pull request "
if generated_pr_description.startswith(redundant_prefix):
generated_pr_description = generated_pr_description[len(redundant_prefix) :]
generated_pr_description = (
generated_pr_description[0].upper() + generated_pr_description[1:]
)
print(f"Generated pull request description: '{generated_pr_description}'")
issues_url = "%s/repos/%s/issues/%s" % (
github_api_url,
repo,
pull_request_id,
)
update_pr_description_result = requests.patch(
issues_url,
headers=authorization_header,
json={"body": generated_pr_description},
)
if update_pr_description_result.status_code != requests.codes.ok:
print(
"Request to update pull request description failed: "
+ str(update_pr_description_result.status_code)
)
print("Response: " + update_pr_description_result.text)
return 1
if __name__ == "__main__":
sys.exit(main())
| [
"Changes in file PLACEHOLDER: PLACEHOLDER\n",
"\nWrite a pull request description focusing on the motivation behind the change and why it improves the project.\nGo straight to the point.\n\nThe title of the pull request is \"Enable valgrind on CI\" and the following changes took place: \n\nChanges in file .github/workflows/build-ut-coverage.yml: @@ -24,6 +24,7 @@ jobs:\n run: |\n sudo apt-get update\n sudo apt-get install -y lcov\n+ sudo apt-get install -y valgrind\n sudo apt-get install -y ${{ matrix.compiler.cc }}\n sudo apt-get install -y ${{ matrix.compiler.cxx }}\n - name: Checkout repository\n@@ -48,3 +49,7 @@ jobs:\n with:\n files: coverage.info\n fail_ci_if_error: true\n+ - name: Run valgrind\n+ run: |\n+ valgrind --tool=memcheck --leak-check=full --leak-resolution=med + --track-origins=yes --vgdb=no --error-exitcode=1 ${build_dir}/test/command_parser_test\nChanges in file test/CommandParserTest.cpp: @@ -566,7 +566,7 @@ TEST(CommandParserTest, ParsedCommandImpl_WhenArgumentIsSupportedNumericTypeWill\n unsigned long long expectedUnsignedLongLong { std::numeric_limits<unsigned long long>::max() };\n float expectedFloat { -164223.123f }; // std::to_string does not play well with floating point min()\n double expectedDouble { std::numeric_limits<double>::max() };\n- long double expectedLongDouble { std::numeric_limits<long double>::max() };\n+ long double expectedLongDouble { 123455678912349.1245678912349L };\n \n auto command = UnparsedCommand::create(expectedCommand, \"dummyDescription\"s)\n .withArgs<int, long, unsigned long, long long, unsigned long long, float, double, long double>();\n",
"You are a helpful assistant who writes pull request descriptions",
"INPUT_MODEL_SAMPLE_PROMPT",
"\nWrite a pull request description focusing on the motivation behind the change and why it improves the project.\nGo straight to the point.\n\nThe title of the pull request is \"PLACEHOLDER\" and the following changes took place: \n\n",
"INPUT_MAX_TOKENS"
] |
2024-01-10 | dhruvyad/super-mario-ai | ai~llm_backend.py | from langchain.agents import initialize_agent, AgentType, Tool
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
# iterate over image assets and transform them given the prompt
def game_visual_updater(prompt):
pass
# return a 2D array consistenting of elements that can be used to create a Mario level
def level_creator(prompt):
pass
tools = [
Tool(
name = "Update Game",
func=game_visual_updater,
description="useful for when you need to update the visual details of the video game. You should define the ways in which you need to update the game."
),
Tool(
name="Create Level",
func=level_creator,
description="useful for when you need to create a new level for the video game."
),
]
main_agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True)
def response_function(prompt, history):
# main_agent.run(prompt)
return "Hi." | [] |
2024-01-10 | lifan-yuan/CRAFT | vqa~vision_models.py | """
Adding a new functionality is easy. Just implement your new model as a subclass of BaseModel.
The code will make the rest: it will make it available for the processes to call by using
process(name, *args, **kwargs), where *args and **kwargs are the arguments of the models process() method.
"""
import abc
import backoff
import contextlib
import openai
import os
import re
import timeit
import torch
import torchvision
import warnings
from PIL import Image
from collections import Counter
from contextlib import redirect_stdout
from functools import partial
from itertools import chain
from joblib import Memory
from rich.console import Console
from torch import hub
from torch.nn import functional as F
from torchvision import transforms
from typing import List, Union
import json
from configs import config
from vipergpt_utils import HiddenPrints
import requests
import time
cache = Memory('cache/' if config.use_cache else None, verbose=0)
device = "cuda" if torch.cuda.is_available() else "cpu"
console = Console(highlight=False)
HiddenPrints = partial(HiddenPrints, console=console, use_newline=config.multiprocessing)
with open("api.key") as f:
CANDIDATE_API = f.readlines()
CANDIDATE_API = [api.strip() for api in CANDIDATE_API]
count = 0
api_num = len(CANDIDATE_API)
CURRENT_API = int((count / 3) % api_num)
CURRENT_API = int(count % api_num)
openai.api_key = CANDIDATE_API[CURRENT_API]
# --------------------------- Base abstract model --------------------------- #
class BaseModel(abc.ABC):
to_batch = False
seconds_collect_data = 1.5 # Window of seconds to group inputs, if to_batch is True
max_batch_size = 10 # Maximum batch size, if to_batch is True. Maximum allowed by OpenAI
requires_gpu = True
def __init__(self, gpu_number):
self.dev = f'cuda:{gpu_number}' if device == 'cuda' else device
@abc.abstractmethod
def forward(self, *args, **kwargs):
"""
If to_batch is True, every arg and kwarg will be a list of inputs, and the output should be a list of outputs.
The way it is implemented in the background, if inputs with defaults are not specified, they will take the
default value, but still be given as a list to the forward method.
"""
pass
@classmethod
@abc.abstractmethod
def name(cls) -> str:
"""The name of the model has to be given by the subclass"""
pass
@classmethod
def list_processes(cls):
"""
A single model can be run in multiple processes, for example if there are different tasks to be done with it.
If multiple processes are used, override this method to return a list of strings.
Remember the @classmethod decorator.
If we specify a list of processes, the self.forward() method has to have a "process_name" parameter that gets
automatically passed in.
See GPT3Model for an example.
"""
return [cls.name]
# ------------------------------ Specific models ---------------------------- #
class ObjectDetector(BaseModel):
name = 'object_detector'
def __init__(self, gpu_number=0):
super().__init__(gpu_number)
with HiddenPrints('ObjectDetector'):
detection_model = hub.load('facebookresearch/detr', 'detr_resnet50', pretrained=True).to(self.dev)
detection_model.eval()
self.detection_model = detection_model
@torch.no_grad()
def forward(self, image: torch.Tensor):
"""get_object_detection_bboxes"""
input_batch = image.to(self.dev).unsqueeze(0) # create a mini-batch as expected by the model
detections = self.detection_model(input_batch)
p = detections['pred_boxes']
p = torch.stack([p[..., 0], 1 - p[..., 3], p[..., 2], 1 - p[..., 1]], -1) # [left, lower, right, upper]
detections['pred_boxes'] = p
return detections
class DepthEstimationModel(BaseModel):
name = 'depth'
def __init__(self, gpu_number=0, model_type='DPT_Large'):
super().__init__(gpu_number)
with HiddenPrints('DepthEstimation'):
warnings.simplefilter("ignore")
# Model options: MiDaS_small, DPT_Hybrid, DPT_Large
torch_cache_dir = "/yangyic3/.cache/torch/hub"
# depth_estimation_model = hub.load(os.path.join(torch_cache_dir, 'intel-isl_MiDaS_master'), model_type, source="local", pretrained=True).to(self.dev)
depth_estimation_model = hub.load('intel-isl/MiDaS', model_type, pretrained=True).to(self.dev)
depth_estimation_model.eval()
# midas_transforms = torch.hub.load(os.path.join(torch_cache_dir, 'intel-isl_MiDaS_master'), "transforms", source="local")
midas_transforms = torch.hub.load('intel-isl/MiDaS', "transforms")
if model_type == "DPT_Large" or model_type == "DPT_Hybrid":
self.transform = midas_transforms.dpt_transform
else:
self.transform = midas_transforms.small_transform
self.depth_estimation_model = depth_estimation_model
@torch.no_grad()
def forward(self, image: torch.Tensor):
"""Estimate depth map"""
image_numpy = image.cpu().permute(1, 2, 0).numpy() * 255
input_batch = self.transform(image_numpy).to(self.dev)
prediction = self.depth_estimation_model(input_batch)
# Resize to original size
prediction = torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=image_numpy.shape[:2],
mode="bicubic",
align_corners=False,
).squeeze()
# We compute the inverse because the model returns inverse depth
to_return = 1 / prediction
to_return = to_return.cpu().numpy()
return to_return # To save: plt.imsave(path_save, prediction.cpu().numpy())
class CLIPModel(BaseModel):
name = 'clip'
def __init__(self, gpu_number=0, version="ViT-L/14@336px"): # @336px
super().__init__(gpu_number)
import clip
self.clip = clip
with HiddenPrints('CLIP'):
model, preprocess = clip.load(version, device=self.dev)
model.eval()
model.requires_grad_ = False
self.model = model
self.negative_text_features = None
self.transform = self.get_clip_transforms_from_tensor(336 if "336" in version else 224)
# @staticmethod
def _convert_image_to_rgb(self, image):
return image.convert("RGB")
# @staticmethod
def get_clip_transforms_from_tensor(self, n_px=336):
return transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(n_px, interpolation=transforms.InterpolationMode.BICUBIC),
transforms.CenterCrop(n_px),
self._convert_image_to_rgb,
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
@torch.no_grad()
def binary_score(self, image: torch.Tensor, prompt, negative_categories=None):
is_video = isinstance(image, torch.Tensor) and image.ndim == 4
if is_video: # video
image = torch.stack([self.transform(image[i]) for i in range(image.shape[0])], dim=0)
else:
image = self.transform(image).unsqueeze(0).to(self.dev)
prompt_prefix = "photo of "
prompt = prompt_prefix + prompt
if negative_categories is None:
if self.negative_text_features is None:
self.negative_text_features = self.clip_negatives(prompt_prefix)
negative_text_features = self.negative_text_features
else:
negative_text_features = self.clip_negatives(prompt_prefix, negative_categories)
text = self.clip.tokenize([prompt]).to(self.dev)
image_features = self.model.encode_image(image.to(self.dev))
image_features = F.normalize(image_features, dim=-1)
pos_text_features = self.model.encode_text(text)
pos_text_features = F.normalize(pos_text_features, dim=-1)
text_features = torch.concat([pos_text_features, negative_text_features], axis=0)
# run competition where we do a binary classification
# between the positive and all the negatives, then take the mean
sim = (100.0 * image_features @ text_features.T).squeeze(dim=0)
if is_video:
query = sim[..., 0].unsqueeze(-1).broadcast_to(sim.shape[0], sim.shape[-1] - 1)
others = sim[..., 1:]
res = F.softmax(torch.stack([query, others], dim=-1), dim=-1)[..., 0].mean(-1)
else:
res = F.softmax(torch.cat((sim[0].broadcast_to(1, sim.shape[0] - 1),
sim[1:].unsqueeze(0)), dim=0), dim=0)[0].mean()
return res
@torch.no_grad()
def clip_negatives(self, prompt_prefix, negative_categories=None):
if negative_categories is None:
with open('useful_lists/random_negatives.txt') as f:
negative_categories = [x.strip() for x in f.read().split()]
# negative_categories = negative_categories[:1000]
# negative_categories = ["a cat", "a lamp"]
negative_categories = [prompt_prefix + x for x in negative_categories]
negative_tokens = self.clip.tokenize(negative_categories).to(self.dev)
negative_text_features = self.model.encode_text(negative_tokens)
negative_text_features = F.normalize(negative_text_features, dim=-1)
return negative_text_features
@torch.no_grad()
def classify(self, image: Union[torch.Tensor, list], categories: List[str], return_index=True):
is_list = isinstance(image, list)
if is_list:
assert len(image) == len(categories)
image = [self.transform(x).unsqueeze(0) for x in image]
image_clip = torch.cat(image, dim=0).to(self.dev)
elif len(image.shape) == 3:
image_clip = self.transform(image).to(self.dev).unsqueeze(0)
else: # Video (process images separately)
image_clip = torch.stack([self.transform(x) for x in image], dim=0).to(self.dev)
# if len(image_clip.shape) == 3:
# image_clip = image_clip.unsqueeze(0)
prompt_prefix = "photo of "
categories = [prompt_prefix + x for x in categories]
categories = self.clip.tokenize(categories).to(self.dev)
text_features = self.model.encode_text(categories)
text_features = F.normalize(text_features, dim=-1)
image_features = self.model.encode_image(image_clip)
image_features = F.normalize(image_features, dim=-1)
if image_clip.shape[0] == 1:
# get category from image
softmax_arg = image_features @ text_features.T # 1 x n
else:
if is_list:
# get highest category-image match with n images and n corresponding categories
softmax_arg = (image_features @ text_features.T).diag().unsqueeze(0) # n x n -> 1 x n
else:
softmax_arg = (image_features @ text_features.T)
similarity = (100.0 * softmax_arg).softmax(dim=-1).squeeze(0)
if not return_index:
return similarity
else:
result = torch.argmax(similarity, dim=-1)
if result.shape == ():
result = result.item()
return result
@torch.no_grad()
def compare(self, images: List[torch.Tensor], prompt, return_scores=False):
images = [self.transform(im).unsqueeze(0).to(self.dev) for im in images]
images = torch.cat(images, dim=0)
prompt_prefix = "photo of "
prompt = prompt_prefix + prompt
text = self.clip.tokenize([prompt]).to(self.dev)
image_features = self.model.encode_image(images.to(self.dev))
image_features = F.normalize(image_features, dim=-1)
text_features = self.model.encode_text(text)
text_features = F.normalize(text_features, dim=-1)
sim = (image_features @ text_features.T).squeeze(dim=-1) # Only one text, so squeeze
if return_scores:
return sim
res = sim.argmax()
return res
def forward(self, image, prompt, task='score', return_index=True, negative_categories=None, return_scores=False):
if task == 'classify':
categories = prompt
clip_sim = self.classify(image, categories, return_index=return_index)
out = clip_sim
elif task == 'score':
clip_score = self.binary_score(image, prompt, negative_categories=negative_categories)
out = clip_score
else: # task == 'compare'
idx = self.compare(image, prompt, return_scores)
out = idx
if not isinstance(out, int):
out = out.cpu()
return out
class MaskRCNNModel(BaseModel):
name = 'maskrcnn'
def __init__(self, gpu_number=0, threshold=config.detect_thresholds.maskrcnn):
super().__init__(gpu_number)
with HiddenPrints('MaskRCNN'):
obj_detect = torchvision.models.detection.maskrcnn_resnet50_fpn_v2(weights='COCO_V1').to(self.dev)
# obj_detect = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True).to(self.dev)
obj_detect.eval()
obj_detect.requires_grad_(False)
self.categories = torchvision.models.detection.MaskRCNN_ResNet50_FPN_V2_Weights.COCO_V1.meta['categories']
self.obj_detect = obj_detect
self.threshold = threshold
def prepare_image(self, image):
image = image.to(self.dev)
return image
@torch.no_grad()
def detect(self, images: torch.Tensor, return_labels=True):
if type(images) != list:
images = [images]
images = [self.prepare_image(im) for im in images]
detections = self.obj_detect(images)
for i in range(len(images)):
height = detections[i]['masks'].shape[-2]
# Just return boxes (no labels no masks, no scores) with scores > threshold
if return_labels: # In the current implementation, we only return labels
d_i = detections[i]['labels'][detections[i]['scores'] > self.threshold]
detections[i] = set([self.categories[d] for d in d_i])
else:
d_i = detections[i]['boxes'][detections[i]['scores'] > self.threshold]
# Return [left, lower, right, upper] instead of [left, upper, right, lower]
detections[i] = torch.stack([d_i[:, 0], height - d_i[:, 3], d_i[:, 2], height - d_i[:, 1]], dim=1)
return detections
def forward(self, image, return_labels=False):
obj_detections = self.detect(image, return_labels)
# Move to CPU before sharing. Alternatively we can try cloning tensors in CUDA, but may not work
obj_detections = [(v.to('cpu') if isinstance(v, torch.Tensor) else list(v)) for v in obj_detections]
return obj_detections
class OwlViTModel(BaseModel):
name = 'owlvit'
def __init__(self, gpu_number=0, threshold=config.detect_thresholds.owlvit):
super().__init__(gpu_number)
from transformers import OwlViTProcessor, OwlViTForObjectDetection
with HiddenPrints("OwlViT"):
processor = OwlViTProcessor.from_pretrained("google/owlvit-base-patch32")
model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch32")
model.eval()
model.requires_grad_(False)
self.model = model.to(self.dev)
self.processor = processor
self.threshold = threshold
@torch.no_grad()
def forward(self, image: torch.Tensor, text: List[str], return_labels: bool = False):
if isinstance(image, list):
raise TypeError("image has to be a torch tensor, not a list")
if isinstance(text, str):
text = [text]
text_original = text
text = ['a photo of a ' + t for t in text]
inputs = self.processor(text=text, images=image, return_tensors="pt") # padding="longest",
inputs = {k: v.to(self.dev) for k, v in inputs.items()}
outputs = self.model(**inputs)
# Target image sizes (height, width) to rescale box predictions [batch_size, 2]
target_sizes = torch.tensor([image.shape[1:]]).to(self.dev)
# Convert outputs (bounding boxes and class logits) to COCO API
results = self.processor.post_process(outputs=outputs, target_sizes=target_sizes)
boxes, scores, labels = results[0]["boxes"], results[0]["scores"], results[0]["labels"]
indices_good = scores > self.threshold
boxes = boxes[indices_good]
# Change to format where large "upper"/"lower" means more up
left, upper, right, lower = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
height = image.shape[-2]
boxes = torch.stack([left, height - lower, right, height - upper], -1)
if return_labels:
labels = labels[indices_good]
labels = [text_original[lab].re('a photo of a ') for lab in labels]
return boxes, labels
return boxes.cpu() # [x_min, y_min, x_max, y_max]
class GLIPModel(BaseModel):
name = 'glip'
def __init__(self, model_size='large', gpu_number=0, *args):
BaseModel.__init__(self, gpu_number)
with contextlib.redirect_stderr(open(os.devnull, "w")): # Do not print nltk_data messages when importing
from maskrcnn_benchmark.engine.predictor_glip import GLIPDemo, to_image_list, create_positive_map, \
create_positive_map_label_to_token_from_positive_map
working_dir = f'{config.path_pretrained_models}/GLIP/'
if model_size == 'tiny':
config_file = working_dir + "configs/glip_Swin_T_O365_GoldG.yaml"
weight_file = working_dir + "checkpoints/glip_tiny_model_o365_goldg_cc_sbu.pth"
else: # large
config_file = working_dir + "configs/glip_Swin_L.yaml"
weight_file = working_dir + "checkpoints/glip_large_model.pth"
class OurGLIPDemo(GLIPDemo):
def __init__(self, dev, *args_demo):
kwargs = {
'min_image_size': 800,
'confidence_threshold': config.detect_thresholds.glip,
'show_mask_heatmaps': False
}
self.dev = dev
from maskrcnn_benchmark.config import cfg
# manual override some options
cfg.local_rank = 0
cfg.num_gpus = 1
cfg.merge_from_file(config_file)
cfg.merge_from_list(["MODEL.WEIGHT", weight_file])
cfg.merge_from_list(["MODEL.DEVICE", self.dev])
with HiddenPrints("GLIP"), torch.cuda.device(self.dev):
from transformers.utils import logging
logging.set_verbosity_error()
GLIPDemo.__init__(self, cfg, *args_demo, **kwargs)
if self.cfg.MODEL.RPN_ARCHITECTURE == "VLDYHEAD":
plus = 1
else:
plus = 0
self.plus = plus
self.color = 255
@torch.no_grad()
def compute_prediction(self, original_image, original_caption, custom_entity=None):
image = self.transforms(original_image)
# image = [image, image.permute(0, 2, 1)]
image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)
image_list = image_list.to(self.dev)
# caption
if isinstance(original_caption, list):
if len(original_caption) > 40:
all_predictions = None
for loop_num, i in enumerate(range(0, len(original_caption), 40)):
list_step = original_caption[i:i + 40]
prediction_step = self.compute_prediction(original_image, list_step, custom_entity=None)
if all_predictions is None:
all_predictions = prediction_step
else:
# Aggregate predictions
all_predictions.bbox = torch.cat((all_predictions.bbox, prediction_step.bbox), dim=0)
for k in all_predictions.extra_fields:
all_predictions.extra_fields[k] = \
torch.cat((all_predictions.extra_fields[k],
prediction_step.extra_fields[k] + loop_num), dim=0)
return all_predictions
# we directly provided a list of category names
caption_string = ""
tokens_positive = []
seperation_tokens = " . "
for word in original_caption:
tokens_positive.append([len(caption_string), len(caption_string) + len(word)])
caption_string += word
caption_string += seperation_tokens
tokenized = self.tokenizer([caption_string], return_tensors="pt")
# tokens_positive = [tokens_positive] # This was wrong
tokens_positive = [[v] for v in tokens_positive]
original_caption = caption_string
# print(tokens_positive)
else:
tokenized = self.tokenizer([original_caption], return_tensors="pt")
if custom_entity is None:
tokens_positive = self.run_ner(original_caption)
# print(tokens_positive)
# process positive map
positive_map = create_positive_map(tokenized, tokens_positive)
positive_map_label_to_token = create_positive_map_label_to_token_from_positive_map(positive_map,
plus=self.plus)
self.positive_map_label_to_token = positive_map_label_to_token
tic = timeit.time.perf_counter()
# compute predictions
with HiddenPrints(): # Hide some deprecated notices
predictions = self.model(image_list, captions=[original_caption],
positive_map=positive_map_label_to_token)
predictions = [o.to(self.cpu_device) for o in predictions]
# print("inference time per image: {}".format(timeit.time.perf_counter() - tic))
# always single image is passed at a time
prediction = predictions[0]
# reshape prediction (a BoxList) into the original image size
height, width = original_image.shape[-2:]
# if self.tensor_inputs:
# else:
# height, width = original_image.shape[:-1]
prediction = prediction.resize((width, height))
if prediction.has_field("mask"):
# if we have masks, paste the masks in the right position
# in the image, as defined by the bounding boxes
masks = prediction.get_field("mask")
# always single image is passed at a time
masks = self.masker([masks], [prediction])[0]
prediction.add_field("mask", masks)
return prediction
@staticmethod
def to_left_right_upper_lower(bboxes):
return [(bbox[1], bbox[3], bbox[0], bbox[2]) for bbox in bboxes]
@staticmethod
def to_xmin_ymin_xmax_ymax(bboxes):
# invert the previous method
return [(bbox[2], bbox[0], bbox[3], bbox[1]) for bbox in bboxes]
@staticmethod
def prepare_image(image):
image = image[[2, 1, 0]] # convert to bgr for opencv-format for glip
return image
@torch.no_grad()
def forward(self, image: torch.Tensor, obj: Union[str, list], return_labels: bool = False,
confidence_threshold=None):
if confidence_threshold is not None:
original_confidence_threshold = self.confidence_threshold
self.confidence_threshold = confidence_threshold
# if isinstance(object, list):
# object = ' . '.join(object) + ' .' # add separation tokens
image = self.prepare_image(image)
# Avoid the resizing creating a huge image in a pathological case
ratio = image.shape[1] / image.shape[2]
ratio = max(ratio, 1 / ratio)
original_min_image_size = self.min_image_size
if ratio > 10:
self.min_image_size = int(original_min_image_size * 10 / ratio)
self.transforms = self.build_transform()
with torch.cuda.device(self.dev):
inference_output = self.inference(image, obj)
bboxes = inference_output.bbox.cpu().numpy().astype(int)
# bboxes = self.to_left_right_upper_lower(bboxes)
if ratio > 10:
self.min_image_size = original_min_image_size
self.transforms = self.build_transform()
bboxes = torch.tensor(bboxes)
# Convert to [left, lower, right, upper] instead of [left, upper, right, lower]
height = image.shape[-2]
bboxes = torch.stack([bboxes[:, 0], height - bboxes[:, 3], bboxes[:, 2], height - bboxes[:, 1]], dim=1)
if confidence_threshold is not None:
self.confidence_threshold = original_confidence_threshold
if return_labels:
# subtract 1 because it's 1-indexed for some reason
return bboxes, inference_output.get_field("labels").cpu().numpy() - 1
return bboxes
self.glip_demo = OurGLIPDemo(*args, dev=self.dev)
def forward(self, *args, **kwargs):
result = self.glip_demo.forward(*args, **kwargs)
return result
class TCLModel(BaseModel):
name = 'tcl'
def __init__(self, gpu_number=0):
from base_models.tcl.tcl_model_pretrain import ALBEF
from base_models.tcl.tcl_vit import interpolate_pos_embed
from base_models.tcl.tcl_tokenization_bert import BertTokenizer
super().__init__(gpu_number)
config = {
'image_res': 384,
'mlm_probability': 0.15,
'embed_dim': 256,
'vision_width': 768,
# 'bert_config': 'base_models/tcl_config_bert.json',
'bert_config': 'base_models/tcl/tcl_config_bert.json',
'temp': 0.07,
'queue_size': 65536,
'momentum': 0.995,
}
text_encoder = 'bert-base-uncased'
# checkpoint_path = f'{config.path_pretrained_models}/TCL_4M.pth'
checkpoint_path = './pretrained_models/TCL/TCL_4M.pth'
self.tokenizer = BertTokenizer.from_pretrained(text_encoder)
with warnings.catch_warnings(), HiddenPrints("TCL"):
model = ALBEF(config=config, text_encoder=text_encoder, tokenizer=self.tokenizer)
checkpoint = torch.load(checkpoint_path, map_location='cpu')
state_dict = checkpoint['model']
# reshape positional embedding to accomodate for image resolution change
pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'], model.visual_encoder)
state_dict['visual_encoder.pos_embed'] = pos_embed_reshaped
m_pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'],
model.visual_encoder_m)
state_dict['visual_encoder_m.pos_embed'] = m_pos_embed_reshaped
model.load_state_dict(state_dict, strict=False)
self.model = model.to(self.dev)
self.model.eval()
normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
self.test_transform = transforms.Compose([
transforms.Resize((config['image_res'], config['image_res']), interpolation=Image.BICUBIC),
transforms.ToTensor(),
normalize,
])
self.negative_text_features = None
def transform(self, image):
image = transforms.ToPILImage()(image)
image = self.test_transform(image)
return image
def prepare_image(self, image):
image = self.transform(image)
image = image.unsqueeze(0)
image = image.to(self.dev)
return image
@torch.no_grad()
def binary_score(self, images: Union[List[torch.Tensor], torch.Tensor], prompt):
single_image = False
if isinstance(images, torch.Tensor):
single_image = True
images = [images]
images = [self.prepare_image(im) for im in images]
images = torch.cat(images, dim=0)
first_words = ['description', 'caption', 'alt text']
second_words = ['photo', 'image', 'picture']
options = [f'{fw}: {sw} of a' for fw in first_words for sw in second_words]
prompts = [f'{option} {prompt}' for option in options]
text_input = self.tokenizer(prompts, padding='max_length', truncation=True, max_length=30, return_tensors="pt") \
.to(self.dev)
text_output = self.model.text_encoder(text_input.input_ids, attention_mask=text_input.attention_mask,
mode='text')
text_feats = text_output # .last_hidden_state
text_atts = text_input.attention_mask
image_feats = self.model.visual_encoder(images)
img_len = image_feats.shape[0]
text_len = text_feats.shape[0]
image_feats = image_feats.unsqueeze(1).repeat(1, text_len, 1, 1).view(-1, *image_feats.shape[-2:])
text_feats = text_feats.unsqueeze(0).repeat(img_len, 1, 1, 1).view(-1, *text_feats.shape[-2:])
text_atts = text_atts.unsqueeze(0).repeat(img_len, 1, 1).view(-1, *text_atts.shape[-1:])
image_feats_att = torch.ones(image_feats.size()[:-1], dtype=torch.long).to(self.dev)
output = self.model.text_encoder(encoder_embeds=text_feats, attention_mask=text_atts,
encoder_hidden_states=image_feats, encoder_attention_mask=image_feats_att,
return_dict=True, mode='fusion')
scores = self.model.itm_head(output[:, 0, :])[:, 1]
scores = scores.view(img_len, text_len)
score = scores.sigmoid().max(-1)[0]
if single_image:
score = score.item()
return score
@torch.no_grad()
def classify(self, image, texts, return_index=True):
if isinstance(image, list):
assert len(image) == len(texts)
image = [self.transform(x).unsqueeze(0) for x in image]
image_tcl = torch.cat(image, dim=0).to(self.dev)
else:
image_tcl = self.prepare_image(image)
text_input = self.tokenizer(texts, padding='max_length', truncation=True, max_length=30, return_tensors="pt") \
.to(self.dev)
text_output = self.model.text_encoder(text_input.input_ids, attention_mask=text_input.attention_mask,
mode='text')
text_feats = text_output # .last_hidden_state
text_embeds = F.normalize(self.model.text_proj(text_feats[:, 0, :]))
text_atts = text_input.attention_mask
image_feats = self.model.visual_encoder(image_tcl)
image_embeds = self.model.vision_proj(image_feats[:, 0, :])
image_embeds = F.normalize(image_embeds, dim=-1)
# In the original code, this is only used to select the topk pairs, to not compute ITM head on all pairs.
# But other than that, not used
sims_matrix = image_embeds @ text_embeds.t()
sims_matrix_t = sims_matrix.t()
# Image-Text Matching (ITM): Binary classifier for every image-text pair
# Only one direction, because we do not filter bet t2i, i2t, and do all pairs
image_feats_att = torch.ones(image_feats.size()[:-1], dtype=torch.long).to(self.dev)
output = self.model.text_encoder(encoder_embeds=text_feats, attention_mask=text_atts,
encoder_hidden_states=image_feats, encoder_attention_mask=image_feats_att,
return_dict=True, mode='fusion')
score_matrix = self.model.itm_head(output[:, 0, :])[:, 1]
if not return_index:
return score_matrix
else:
return torch.argmax(score_matrix).item()
def forward(self, image, texts, task='classify', return_index=True):
if task == 'classify':
best_text = self.classify(image, texts, return_index=return_index)
out = best_text
else: # task == 'score': # binary_score
score = self.binary_score(image, texts)
out = score
if isinstance(out, torch.Tensor):
out = out.cpu()
return out
@cache.cache(ignore=['result'])
def gpt3_cache_aux(fn_name, prompts, temperature, n_votes, result):
"""
This is a trick to manually cache results from GPT-3. We want to do it manually because the queries to GPT-3 are
batched, and caching doesn't make sense for batches. With this we can separate individual samples in the batch
"""
return result
class GPT3Model(BaseModel):
name = 'gpt3'
to_batch = False
requires_gpu = False
def __init__(self, gpu_number=0):
super().__init__(gpu_number=gpu_number)
with open(config.gpt3.qa_prompt) as f:
self.qa_prompt = f.read().strip()
self.temperature = config.gpt3.temperature
self.n_votes = config.gpt3.n_votes
self.model = config.gpt3.model
# initial cleaning for reference QA results
@staticmethod
def process_answer(answer):
answer = answer.lstrip() # remove leading spaces (our addition)
answer = answer.replace('.', '').replace(',', '').lower()
to_be_removed = {'a', 'an', 'the', 'to', ''}
answer_list = answer.split(' ')
answer_list = [item for item in answer_list if item not in to_be_removed]
return ' '.join(answer_list)
@staticmethod
def get_union(lists):
return list(set(chain.from_iterable(lists)))
@staticmethod
def most_frequent(answers):
answer_counts = Counter(answers)
return answer_counts.most_common(1)[0][0]
def get_qa(self, prompts, prompt_base: str=None) -> List[str]:
if prompt_base is None:
prompt_base = self.qa_prompt
prompts_total = []
for p in prompts:
question = p
prompts_total.append(prompt_base.format(question))
response = self.get_qa_fn(prompts_total)
if self.n_votes > 1:
response_ = []
for i in range(len(prompts)):
if self.model == 'chatgpt':
resp_i = [r['message']['content']
for r in response['choices'][i * self.n_votes:(i + 1) * self.n_votes]]
else:
resp_i = [r['text'] for r in response['choices'][i * self.n_votes:(i + 1) * self.n_votes]]
response_.append(self.most_frequent(resp_i))
response = response_
else:
if self.model == 'chatgpt':
response = [r['message']['content'] for r in response['choices']]
else:
response = [self.process_answer(r["text"]) for r in response['choices']]
return response
def get_qa_fn(self, prompt):
response = self.query_gpt3(prompt, model=self.model, max_tokens=5, logprobs=1, stream=False,
stop=["\n", "<|endoftext|>"])
return response
def get_general(self, prompts) -> List[str]:
if self.model == "chatgpt":
response = self.query_gpt3(prompts, model=self.model, max_tokens=256, top_p=1, frequency_penalty=0,
presence_penalty=0)
response = [r['message']['content'] for r in response['choices']]
else:
response = self.query_gpt3(prompts, model=self.model, max_tokens=256, top_p=1, frequency_penalty=0,
presence_penalty=0)
response = [r["text"] for r in response['choices']]
return response
def query_gpt3(self, prompt, model="gpt-3.5-turbo-0613", max_tokens=16, logprobs=None, stream=False,
stop=None, top_p=1, frequency_penalty=0, presence_penalty=0):
global count, api_num, CURRENT_API, CANDIDATE_API
#######################################################################
while True:
try:
response = openai.ChatCompletion.create(
model=model,
messages=[{"role": "user", "content": p} for p in prompt],
temperature=self.temperature,
max_tokens=max_tokens,
top_p=top_p,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty
)
except Exception as e:
# print(e)
if str(e).startswith("You exceeded your current quota"):
print(f"API #{CURRENT_API} run out! Use the next one!")
CURRENT_API += 1
openai.api_key = CANDIDATE_API[CURRENT_API]
continue
elif str(e).startswith("Rate limit reached"):
continue
else:
time.sleep(1)
continue
else:
break
#######################################################################
response = response['choices'][0]['message']['content']
return response
###########################################################################
def query_with_message(self, messages, model="gpt-3.5-turbo-0613", max_tokens=16, temperature=0.0, logprobs=None, stream=False,
stop=None, top_p=1, frequency_penalty=0, presence_penalty=0):
global count, api_num, CURRENT_API, CANDIDATE_API
for _ in range(20):
try:
count += 1
CURRENT_API = int(count % api_num)
openai.api_key = CANDIDATE_API[CURRENT_API]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty
)
except Exception as e:
if str(e).startswith("You exceeded your current quota"):
print(f"API #{CURRENT_API} run out! Use the next one!")
count += 1
CURRENT_API = int(count % api_num)
openai.api_key = CANDIDATE_API[CURRENT_API]
continue
elif str(e).startswith("Rate limit reached"):
count += 1
CURRENT_API = int(count % api_num)
openai.api_key = CANDIDATE_API[CURRENT_API]
continue
else:
time.sleep(1)
print(e)
continue
else:
count += 1
CURRENT_API = int(count % api_num)
openai.api_key = CANDIDATE_API[CURRENT_API]
break
response = response['choices'][0]['message']['content']
return response
###########################################################################
def forward(self, prompt, process_name):
if not self.to_batch:
prompt = [prompt]
if process_name == 'gpt3_qa':
# if items in prompt are tuples, then we assume it is a question and context
if isinstance(prompt[0], tuple) or isinstance(prompt[0], list):
prompt = [question.format(context) for question, context in prompt]
to_compute = None
results = []
# Check if in cache
if config.use_cache:
for p in prompt:
# This is not ideal, because if not found, later it will have to re-hash the arguments.
# But I could not find a better way to do it.
result = gpt3_cache_aux(process_name, p, self.temperature, self.n_votes, None)
results.append(result) # If in cache, will be actual result, otherwise None
to_compute = [i for i, r in enumerate(results) if r is None]
prompt = [prompt[i] for i in to_compute]
if len(prompt) > 0:
if process_name == 'gpt3_qa':
response = self.get_qa(prompt)
else: # 'gpt3_general', general prompt, has to be given all of it
response = self.get_general(prompt)
else:
response = [] # All previously cached
if config.use_cache:
for p, r in zip(prompt, response):
# "call" forces the overwrite of the cache
gpt3_cache_aux.call(process_name, p, self.temperature, self.n_votes, r)
for i, idx in enumerate(to_compute):
results[idx] = response[i]
else:
results = response
if not self.to_batch:
results = results[0]
return results
@classmethod
def list_processes(cls):
return ['gpt3_' + n for n in ['qa', 'general']]
# @cache.cache
# @backoff.on_exception(backoff.expo, Exception, max_tries=10)
def codex_helper(extended_prompt, temperature=None):
global count, api_num, CURRENT_API, CANDIDATE_API
assert 0 <= config.codex.temperature <= 1
assert 1 <= config.codex.best_of <= 20
if not isinstance(extended_prompt, list):
extended_prompt = [extended_prompt]
########################################################################################
if config.codex.model in ["gpt-4", "gpt-4-0613", "gpt-3.5-turbo", "gpt-3.5-turbo-0613", "codellama"]:
if not isinstance(extended_prompt, list):
extended_prompt = [extended_prompt]
for _ in range(1000):
try:
count += 1
responses = [openai.ChatCompletion.create(
model=config.codex.model,
messages=[
{"role": "system", "content": "Only answer with a function starting def execute_command."},
{"role": "user", "content": prompt}
],
temperature=temperature,
max_tokens=config.codex.max_tokens,
top_p=0.,
stop=["\n\n"],
presence_penalty=0,
frequency_penalty=0,
)
for prompt in extended_prompt]
except openai.error.InvalidRequestError as e: # too long
raise e
except Exception as e:
print(e)
if str(e).startswith("You exceeded your current quota"):
print(f"API #{CURRENT_API} run out! Use the next one!")
count += 1
CURRENT_API = int(count % api_num)
openai.api_key = CANDIDATE_API[CURRENT_API]
continue
elif str(e).startswith("Rate limit reached"):
print(f"API #{CURRENT_API} reach limit! Use the next one!")
print("length:", len(extended_prompt[0].split()))
count += 1
CURRENT_API = int(count % api_num)
openai.api_key = CANDIDATE_API[CURRENT_API]
continue
else:
time.sleep(1)
continue
else:
try:
resp = [r['choices'][0]['message']['content'] for r in responses]
except:
continue
break
else:
raise ValueError("No model specified")
########################################################################################
try:
if len(resp) == 1:
resp = resp[0]
except Exception as e:
print(e)
print(resp)
return ''
return resp
class CodexModel(BaseModel):
name = 'codex'
requires_gpu = False
max_batch_size = 1#5
# Not batched, but every call will probably be a batch (coming from the same process)
def __init__(self, gpu_number=0):
super().__init__(gpu_number=0)
with open(config.codex.prompt) as f:
self.base_prompt = f.read().strip()
self.fixed_code = None
if config.use_fixed_code:
with open(config.fixed_code_file) as f:
self.fixed_code = f.read()
def forward(self, prompt, input_type='image', prompt_file=None, base_prompt=None, temperature=1):
if config.use_fixed_code: # Use the same program for every sample, like in socratic models
return [self.fixed_code] * len(prompt) if isinstance(prompt, list) else self.fixed_code
if prompt_file is not None and base_prompt is None: # base_prompt takes priority
with open(prompt_file) as f:
base_prompt = f.read().strip()
elif base_prompt is None:
base_prompt = self.base_prompt
if isinstance(prompt, list):
extended_prompt = [base_prompt.replace("INSERT_QUERY_HERE", p).replace('INSERT_TYPE_HERE', input_type)
for p in prompt]
elif isinstance(prompt, str):
extended_prompt = [base_prompt.replace("INSERT_QUERY_HERE", prompt).
replace('INSERT_TYPE_HERE', input_type)]
else:
raise TypeError("prompt must be a string or a list of strings")
result = self.forward_(extended_prompt, temperature)
if isinstance(prompt, list):
result = result[0]
return result
def forward_(self, extended_prompt, temperature):
# print(len(extended_prompt))
if len(extended_prompt) > self.max_batch_size:
response = []
for i in range(0, len(extended_prompt), self.max_batch_size):
response += self.forward_(extended_prompt[i:i + self.max_batch_size], temperature)
response = codex_helper(extended_prompt, temperature)
try:
response = codex_helper(extended_prompt, temperature)
except openai.error.RateLimitError as e:
print("Retrying Codex, splitting batch")
if len(extended_prompt) == 1:
warnings.warn("This is taking too long, maybe OpenAI is down? (status.openai.com/)")
# Will only be here after the number of retries in the backoff decorator.
# It probably means a single batch takes up the entire rate limit.
sub_batch_1 = extended_prompt[:len(extended_prompt) // 2]
sub_batch_2 = extended_prompt[len(extended_prompt) // 2:]
if len(sub_batch_1) > 0:
response_1 = self.forward_(sub_batch_1, temperature)
else:
response_1 = []
if len(sub_batch_2) > 0:
response_2 = self.forward_(sub_batch_2, temperature)
else:
response_2 = []
response = response_1 + response_2
except Exception as e:
# Some other error like an internal OpenAI error
print("Retrying Codex")
print(e)
response = self.forward_(extended_prompt, temperature)
return response
class BLIPModel(BaseModel):
name = 'blip'
to_batch = True
max_batch_size = 32
seconds_collect_data = 0.2 # The queue has additionally the time it is executing the previous forward pass
def __init__(self, gpu_number=0, half_precision=config.blip_half_precision,
blip_v2_model_type=config.blip_v2_model_type):
super().__init__(gpu_number)
# from lavis.models import load_model_and_preprocess
from transformers import BlipProcessor, BlipForConditionalGeneration, Blip2Processor, \
Blip2ForConditionalGeneration
# https://huggingface.co/models?sort=downloads&search=Salesforce%2Fblip2-
assert blip_v2_model_type in ['blip2-flan-t5-xxl', 'blip2-flan-t5-xl', 'blip2-opt-2.7b', 'blip2-opt-6.7b',
'blip2-opt-2.7b-coco', 'blip2-flan-t5-xl-coco', 'blip2-opt-6.7b-coco']
with warnings.catch_warnings(), HiddenPrints("BLIP"), torch.cuda.device(self.dev):
max_memory = {gpu_number: torch.cuda.mem_get_info(self.dev)[0]}
self.processor = Blip2Processor.from_pretrained(f"Salesforce/{blip_v2_model_type}")
# Device_map must be sequential for manual GPU selection
try:
self.model = Blip2ForConditionalGeneration.from_pretrained(
f"Salesforce/{blip_v2_model_type}", load_in_8bit=half_precision,
torch_dtype=torch.float16 if half_precision else "auto",
# device_map="sequential", max_memory=max_memory
device_map="auto"
)
# print(self.model.device_map)
except Exception as e:
# Clarify error message. The problem is that it tries to load part of the model to disk.
if "had weights offloaded to the disk" in e.args[0]:
extra_text = ' You may want to consider setting half_precision to True.' if half_precision else ''
raise MemoryError(f"Not enough GPU memory in GPU {self.dev} to load the model.{extra_text}")
else:
raise e
self.qa_prompt = "Question: {} Short answer:"
self.caption_prompt = "a photo of"
self.half_precision = half_precision
self.max_words = 50
@torch.no_grad()
def caption(self, image, prompt=None):
inputs = self.processor(images=image, text=prompt, return_tensors="pt").to(self.dev, torch.float16)
generated_ids = self.model.generate(**inputs, length_penalty=1., num_beams=5, max_length=30, min_length=1,
do_sample=False, top_p=0.9, repetition_penalty=1.0,
num_return_sequences=1, temperature=1)
generated_text = [cap.strip() for cap in
self.processor.batch_decode(generated_ids, skip_special_tokens=True)]
return generated_text
def pre_question(self, question):
# from LAVIS blip_processors
question = re.sub(
r"([.!\"()*#:;~])",
"",
question.lower(),
)
question = question.rstrip(" ")
# truncate question
question_words = question.split(" ")
if len(question_words) > self.max_words:
question = " ".join(question_words[: self.max_words])
return question
@torch.no_grad()
def qa(self, image, question):
inputs = self.processor(images=image, text=question, return_tensors="pt", padding="longest").to(self.dev)
if self.half_precision:
inputs['pixel_values'] = inputs['pixel_values'].half()
generated_ids = self.model.generate(**inputs, length_penalty=-1, num_beams=5, max_length=10, min_length=1,
do_sample=False, top_p=0.9, repetition_penalty=1.0,
num_return_sequences=1, temperature=1)
generated_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)
return generated_text
def forward(self, image, question=None, task='caption'):
if not self.to_batch:
image, question, task = [image], [question], [task]
# Separate into qa and caption batches.
prompts_qa = [self.qa_prompt.format(self.pre_question(q)) for q, t in zip(question, task) if t == 'qa']
images_qa = [im for i, im in enumerate(image) if task[i] == 'qa']
images_caption = [im for i, im in enumerate(image) if task[i] == 'caption']
with torch.cuda.device(self.dev):
response_qa = self.qa(images_qa, prompts_qa) if len(images_qa) > 0 else []
response_caption = self.caption(images_caption) if len(images_caption) > 0 else []
response = []
for t in task:
if t == 'qa':
response.append(response_qa.pop(0))
else:
response.append(response_caption.pop(0))
if not self.to_batch:
response = response[0]
return response
class SaliencyModel(BaseModel):
name = 'saliency'
def __init__(self, gpu_number=0,
path_checkpoint=f'{config.path_pretrained_models}/saliency_inspyrenet_plus_ultra'):
from base_models.inspyrenet.saliency_transforms import get_transform
from base_models.inspyrenet.InSPyReNet import InSPyReNet
from base_models.inspyrenet.backbones.SwinTransformer import SwinB
# These parameters are for the Plus Ultra LR model
super().__init__(gpu_number)
depth = 64
pretrained = True
base_size = [384, 384]
kwargs = {'name': 'InSPyReNet_SwinB', 'threshold': 512}
with HiddenPrints("Saliency"):
model = InSPyReNet(SwinB(pretrained=pretrained, path_pretrained_models=config.path_pretrained_models),
[128, 128, 256, 512, 1024], depth, base_size, **kwargs)
model.load_state_dict(torch.load(os.path.join(path_checkpoint, 'latest.pth'),
map_location=torch.device('cpu')), strict=True)
model = model.to(self.dev)
model.eval()
self.model = model
self.transform_pil = transforms.ToPILImage()
self.transform = get_transform({
'static_resize': {'size': [384, 384]},
'dynamic_resize': {'L': 1280},
'tonumpy': None,
'normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]},
'totensor': None
})
@torch.no_grad()
def forward(self, image):
image_t = self.transform({'image': self.transform_pil(image)})
image_t['image_resized'] = image_t['image_resized'].unsqueeze(0).to(self.dev)
image_t['image'] = image_t['image'].unsqueeze(0).to(self.dev)
pred = self.model(image_t)['pred']
pred_resized = F.interpolate(pred, image.shape[1:], mode='bilinear', align_corners=True)[0, 0]
mask_foreground = pred_resized < 0.5
image_masked = image.clone()
image_masked[:, mask_foreground] = 0
return image_masked
class XVLMModel(BaseModel):
name = 'xvlm'
def __init__(self, gpu_number=0,
path_checkpoint=f'{config.path_pretrained_models}/xvlm/retrieval_mscoco_checkpoint_9.pth'):
from base_models.xvlm.xvlm import XVLMBase
from transformers import BertTokenizer
super().__init__(gpu_number)
image_res = 384
self.max_words = 30
config_xvlm = {
'image_res': image_res,
'patch_size': 32,
'text_encoder': 'bert-base-uncased',
'block_num': 9,
'max_tokens': 40,
'embed_dim': 256,
}
vision_config = {
'vision_width': 1024,
'image_res': 384,
'window_size': 12,
'embed_dim': 128,
'depths': [2, 2, 18, 2],
'num_heads': [4, 8, 16, 32]
}
with warnings.catch_warnings(), HiddenPrints("XVLM"):
model = XVLMBase(config_xvlm, use_contrastive_loss=True, vision_config=vision_config)
checkpoint = torch.load(path_checkpoint, map_location='cpu')
state_dict = checkpoint['model'] if 'model' in checkpoint.keys() else checkpoint
msg = model.load_state_dict(state_dict, strict=False)
if len(msg.missing_keys) > 0:
print('XVLM Missing keys: ', msg.missing_keys)
model = model.to(self.dev)
model.eval()
self.model = model
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
self.transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((image_res, image_res), interpolation=Image.BICUBIC),
transforms.ToTensor(),
normalize,
])
with open('useful_lists/random_negatives.txt') as f:
self.negative_categories = [x.strip() for x in f.read().split()]
@staticmethod
def pre_caption(caption, max_words):
caption = re.sub(
r"([,.'!?\"()*#:;~])",
'',
caption.lower(),
).replace('-', ' ').replace('/', ' ').replace('<person>', 'person')
caption = re.sub(
r"\s{2,}",
' ',
caption,
)
caption = caption.rstrip('\n')
caption = caption.strip(' ')
# truncate caption
caption_words = caption.split(' ')
if len(caption_words) > max_words:
caption = ' '.join(caption_words[:max_words])
if not len(caption):
raise ValueError("pre_caption yields invalid text")
return caption
@torch.no_grad()
def score(self, images, texts):
if isinstance(texts, str):
texts = [texts]
if not isinstance(images, list):
images = [images]
images = [self.transform(image) for image in images]
images = torch.stack(images, dim=0).to(self.dev)
texts = [self.pre_caption(text, self.max_words) for text in texts]
text_input = self.tokenizer(texts, padding='longest', return_tensors="pt").to(self.dev)
image_embeds, image_atts = self.model.get_vision_embeds(images)
text_ids, text_atts = text_input.input_ids, text_input.attention_mask
text_embeds = self.model.get_text_embeds(text_ids, text_atts)
image_feat, text_feat = self.model.get_features(image_embeds, text_embeds)
logits = image_feat @ text_feat.t()
return logits
@torch.no_grad()
def binary_score(self, image, text, negative_categories):
# Compare with a pre-defined set of negatives
texts = [text] + negative_categories
sim = 100 * self.score(image, texts)[0]
res = F.softmax(torch.cat((sim[0].broadcast_to(1, sim.shape[0] - 1),
sim[1:].unsqueeze(0)), dim=0), dim=0)[0].mean()
return res
def forward(self, image, text, task='score', negative_categories=None):
if task == 'score':
score = self.score(image, text)
else: # binary
score = self.binary_score(image, text, negative_categories=negative_categories)
return score.cpu()
############################################################################################
class BingModel(BaseModel):
name = "bing"
requires_gpu = False
def __init__(self, gpu_number):
self.subscription_key = open("bing.key").read()
self.endpoint='https://api.bing.microsoft.com/v7.0/search'
self.mkt = 'en-GB'
def parse_bing_result(self, result):
responses = []
try:
value = result["webPages"]["value"]
except:
return responses
for i in range(len(value)):
snippet = value[i]['snippet'] if 'snippet' in value[i] else ""
snippet = snippet.replace("<b>", "").replace("</b>", "").strip()
if snippet != "":
responses.append(snippet)
return responses
def forward(self, query):
params = { 'q': query, 'mkt': self.mkt , "answerCount": 1}
headers = { 'Ocp-Apim-Subscription-Key': self.subscription_key }
try:
response = requests.get(self.endpoint, headers=headers, params=params)
response.raise_for_status()
response_data = response.json()
except Exception as ex:
raise ex
responses = self.parse_bing_result(response_data)
search_result = ''
if len(responses) > 0 and responses[0] != "":
inserted_response = " ".join(responses[0].split(" ")[:100])
search_result += f"{inserted_response} ..."
return search_result
############################################################################################ | [
"INSERT_TYPE_HERE",
"Only answer with a function starting def execute_command.",
"PLACEHOLDER PLACEHOLDER",
"PLACEHOLDERPLACEHOLDER",
"INSERT_QUERY_HERE",
"[]",
"photo of ",
"[{'role': 'user', 'content': 'PLACEHOLDER'}]"
] |
2024-01-10 | lifan-yuan/CRAFT | vqa~retrieve_tools.py | import pandas as pd
import numpy as np
import datasets
import openai
from utils import *
def planning(query):
messages = [
{"role": "user", "content": retrieval_template.format(query=query)},
]
response = gpt3_model.query_with_message(messages, max_tokens=200)
plans = [query, response.split("The final answer is: ")[1].strip()]
try:
expected_tools = eval(response.split("\n\n")[-2].split("The useful functions are: ")[1].strip("."))
except:
expected_tools = eval(response.split("\n\n")[-2].split("The useful function is: ")[1].strip("."))
return plans, expected_tools
def match_plan_from_single_perspective(plan_embeddings, tool_embeddings, k=3): # k: number of tools to retrieve for each sub-task from each perspective
tool_list = []
for plan_embedding in plan_embeddings:
# compute cos sim between plan and query
plan_embedding = plan_embedding.unsqueeze(0)
sim = torch.nn.functional.cosine_similarity(plan_embedding.unsqueeze(1), tool_embeddings.unsqueeze(0), dim=2)
topk = torch.topk(sim, k=k, dim=1).indices.squeeze(0).tolist()
tool_list.append(topk)
return tool_list
def retrieve_tool(example, vector_library, model, tokenizer, k=3): # k: number of tools to retrieve for each sub-task
##### Retrieval Stage of CRAFT #####
# decompose the query into sub-tasks
plans, expected_tools = planning(example['query'])
plan_embeddings = compute_simcse(model, tokenizer, plans)
expected_tool_embeddings = compute_simcse(model, tokenizer, expected_tools)
# match plan with tools from different perspectives
tool_by_explanation = match_plan_from_single_perspective(plan_embeddings[1:], vector_library["explanation_embedding"], k=10)
tool_by_name = match_plan_from_single_perspective(expected_tool_embeddings, vector_library["name_embedding"], k=5)
tool_by_query = match_plan_from_single_perspective(plan_embeddings[0].unsqueeze(0), vector_library["query_embedding"], k=10)
counter = Counter([
*[item for sublist in tool_by_explanation for item in sublist], # k_1*len(plans)
*[item for sublist in tool_by_name for item in sublist], # k_1*len(plans)
*[item for sublist in tool_by_query for item in sublist], # k_1*1
])
top_k = counter.most_common(k)
tool_list.extend([tool for (tool, count) in top_k if count >= 2]) # must at least have 2 votes
tool_list = list(set(tool_list))
return {"expected_tools": expected_tools, "retrieved_tools": tool_list}
| [] |
2024-01-10 | lifan-yuan/CRAFT | vqa~run_exp.py | from utils import *
import datasets
import random
import numpy as np
import openai
import os
import openai
from metric_utils import *
from utils import process_code
def generate_code(example):
code = forward('codex', prompt=example["question"], input_type="image")
code = process_code(code)
return {
"code": code
}
from retrieve_tools import retrieve_tool
PROMPT = open(config.codex.prompt).read()
inserted_tools_prompt = """**Note: If necessary, you may also leverage the following tools to directly perform complex operations.
However, please carefully review the implementation code of the tool functions to determine whether to utilize any of them.
Additionally, consider the appropriate method of passing parameters based on your comprehension of the internal implementation of the tool functions, rather than solely relying on the docstring.**\n"""
def wrap_into_function(func_head, docstring):
name = func_head.split("(")[0].strip()
args = ", ".join([arg.split(":")[0].strip() for arg in func_head.split("(")[1].split(")")[0].split(",")])
return f"def {func_head}:" + "\n" + f"\t'''{docstring}\n\t'''" + "\n" + f"\treturn {name}({args})\n"
def wrap_into_incontext_sample(query, call):
code = f"Query: {query}" + "\n" + "def execute_command(image):" + "\n" + "\timage_patch = ImagePatch(image)" + "\n" + f"\treturn {call}\n"
return code
def count_args_from_call(call):
record = [] # record all (), [], {}
tuples = re.findall(r"\((.*?)\)", call)
if len(tuples) > 1: # first one is the total args
for i in range(1, len(tuples)):
record.append(tuples[i])
lists = re.findall(r"\[(.*?)\]", call)
for i in range(0, len(lists)):
record.append(lists[i])
dicts = re.findall(r"\{(.*?)\}", call)
for i in range(0, len(dicts)):
record.append(dicts[i])
# now replace all comma in record with ";" for protection
for i, sub_string in enumerate(record):
call = call.replace(sub_string, sub_string.replace(",", ";"))
# now count the number of args by splitting with ","
try:
args = re.findall(r"\((.*?)\)", call)[0].split(", ")
except:
print(call, re.findall(r"\((.*?)\)", call))
exit()
return len(args)
def remove_extra_functions(code, all_tools, retrieved_tools):
# extract function name and args from retrieved tools
try:
function_names = [extract_function_name(item) for item in [all_tools[i] for i in retrieved_tools]]
except IndexError:
print(len(all_tools), retrieved_tools)
exit()
num_args = [count_args(item) for item in [all_tools[i] for i in retrieved_tools]]
# extract function name and args from code
tool_call = set() # may use multiple tools or each using multiple times
for line in code.split("\n"):
for func_name in function_names:
if func_name in line:
tool_call.add((func_name, line.strip()))
tool_call = list(tool_call)
num_args_in_code = []
for func_name, call in tool_call:
arg_list = []
if "(" in call and ")" in call: # make sure there are args
num_args_in_code.append((func_name, count_args_from_call(call)))
filtered_tools = []
for i, (func_name, num_arg) in enumerate(zip(function_names, num_args)):
if (func_name, num_arg) in num_args_in_code:
filtered_tools.append(retrieved_tools[i]) # list[int]
return filtered_tools
def generate_code_with_retrieval(example, vector_library, model, tokenizer):
print()
print(example["question"])
retrieval_results = retrieve_tool(example, vector_library, model, tokenizer, 3)
retrieved_tools = retrieval_results["retrieved_tools"]
top_k = 3
while True:
try:
tools = retrieved_tools[:top_k]
if len(tools) > 0:
inserted_tools = inserted_tools_prompt + "\n" + "\n\n".join([toolbase["tool"][tool] for tool in tools])
base_prompt = PROMPT.replace("INSERT_TOOL_HERE", inserted_tools)
else:
base_prompt = PROMPT.replace("INSERT_TOOL_HERE", "")
inserted_tools = ""
code = forward('codex', prompt=example["question"], input_type="image", base_prompt=base_prompt)
code = process_code(code)
except openai.error.InvalidRequestError as e: # exceed max token length
print(e)
top_k -= 1
continue
else:
print()
print("\n\n".join([toolbase["tool"][tool] for tool in tools]))
break
# # write base_prompt to temp.txt
# with open("temp.txt", "w") as f:
# f.write(base_prompt)
print()
print(example["question"])
print(code)
print("\n"*3)
return {
"code": code,
"inserted_tool_prompts": inserted_tools,
"retrieved_tools": tools
}
def validate_results(dataset):
code_results_path = f"./results/eval/{args.eval_dataset}/{args.model}.json" if not args.retrieval else \
f"./results/eval/{args.eval_dataset}/{args.model}_retrieval.json"
if os.path.exists(code_results_path):
import json
with open(code_results_path, "r") as f:
results = json.load(f)
keys = list(range(len(results["prediction"].keys())))
predictions = [results["prediction"][str(i)] for i in keys]
groundtruths = [results["groundtruth"][str(i)] for i in keys]
else:
global flag
if not flag:
init_vision_models()
flag = True
predictions = []
groundtruths = []
for data in tqdm(dataset):
image = load_image(data["image_path"])
# wrap tools into code
if args.retrieval:
# should deduplicate the retrieved tools
retrieved_tools = remove_extra_functions(code, toolbase["tool"], data["retrieved_tools"])
retrieved_tools = [toolbase["tool"][i] for i in retrieved_tools]
explanations = [extract_function_docstring(item)[0] for item in retrieved_tools]
retrieved_tools = [tool.replace(explanation, "") for tool, explanation in zip(retrieved_tools, explanations)]
# print(retrieved_tools)
code = "\n\n".join([
*retrieved_tools,
code
])
# execute code
if code is None:
prediction = ""
elif ("pixel" in code) or ("input(" in code) or ("return" not in code): # infinite loop or no return
print("Error in turbo-generated code.")
prediction = ""
else:
try: # normal cases
print(code)
prediction = execute_code(code, image)
print()
print(data["question"])
print(prediction, data["answers"])
except:
print("Error in turbo-generated code. ")
prediction = ""
# process bool to yes/no
if str(prediction) == "True":
prediction = "yes"
elif str(prediction) == "False":
prediction = "no"
predictions.append(prediction)
groundtruths.append(data["answers"])
# save to csv, using pandas
print("save")
import pandas as pd
df = pd.DataFrame({"prediction": predictions, "groundtruth": groundtruths})
df.to_json(code_results_path, indent=4)
print("saved")
return predictions, groundtruths
if __name__ == "__main__":
# add args
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--eval_dataset", type=str)
parser.add_argument("--model", type=str)
parser.add_argument("--retrieval", action="store_true")
parser.add_argument("--tool_epoch", type=int, default=-1)
parser.add_argument("--ablation", type=str, default="none")
args = parser.parse_args()
print(os.getenv('CONFIG_NAMES', None))
dataset = datasets.load_from_disk(f"./datasets/eval/{args.eval_dataset}")#.select(range(1))
assert len(dataset) == 1000
flag = False
####################################### Code Gen #######################################
reserved_columns = ["image_id", "image_path", "question", "answers", "code"]
if args.retrieval:
reserved_columns = reserved_columns + ["retrieved_tools"]
all_columns = list(dataset.features.keys())
##### Tool Scaling #####
if args.retrieval:
code_results_path = f"./results/eval/{args.eval_dataset}_{args.model}_retrieval.json"
else:
code_results_path = f"./results/eval/{args.eval_dataset}_{args.model}.json"
##### Tool-Augmented Code Generation #####
if args.retrieval:
### Prepare model and toolbase for tool retrieval ###
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("princeton-nlp/sup-simcse-roberta-large")
model = AutoModel.from_pretrained("princeton-nlp/sup-simcse-roberta-large").cuda()
##### initialize toolbase and vector library #####
toolbase = datasets.Dataset.from_csv('./results/viper/5_deduplicated_tool.csv')
vector_library = torch.load("./results/viper/vector_library.pt")
print("toolbase length:", len(toolbase))
### Code Generation with Tool Retrieval ###
if os.path.exists(code_results_path):
dataset = datasets.Dataset.from_json(code_results_path)
else:
flag = True
init_vision_models()
dataset = dataset.map(lambda x: generate_code_with_retrieval(x, vector_library, model, tokenizer), load_from_cache_file=False).remove_columns(set(all_columns)-set(reserved_columns))
dataset.to_json(code_results_path)
else: ##### Vinalla Code Generation #####
if os.path.exists(code_results_path):
dataset = datasets.Dataset.from_json(code_results_path)
else:
flag = True
init_vision_models()
dataset = dataset.map(generate_code, load_from_cache_file=False).remove_columns(set(all_columns)-set(reserved_columns))
dataset.to_json(code_results_path)
####################################### Validate #######################################
dataset = dataset.map(lambda x: {"image_path": os.path.join(f"./datasets/eval/{args.eval_dataset}/images", x['image_path'])})
##### Tool Scaling #####
if args.retrieval:
exec_results_path = f"./results/eval/{args.eval_dataset}_{args.model}_retrieval.json"
else:
exec_results_path = f"./results/eval/{args.eval_dataset}_{args.model}.json"
##### Call Vision Models #####
predictions, groundtruths = validate_results(dataset)
##### Compute Metrics #####
import pandas as pd
vqa_acc = 100.00 * compute_vqa_acc(predictions, groundtruths)
f1 = 100.00 * compute_f1(predictions, groundtruths)
print(f"Soft accuracy: {vqa_acc}")
print(f"F1 score: {f1}")
##### Write Metrics #####
os.makedirs(f"./results/metrics/{args.eval_dataset}", exist_ok=True)
df = pd.DataFrame({"soft_acc": [vqa_acc], "f1": [f1]})
metric_results_path = f"./results/metrics/{args.eval_dataset}/{args.model}.csv"
df.to_csv(metric_results_path)
| [
"**Note: If necessary, you may also leverage the following tools to directly perform complex operations. \nHowever, please carefully review the implementation code of the tool functions to determine whether to utilize any of them.\nAdditionally, consider the appropriate method of passing parameters based on your comprehension of the internal implementation of the tool functions, rather than solely relying on the docstring.**\n",
"INSERT_TOOL_HERE"
] |
2024-01-10 | Eloise1988/OPENAI | asyncV2~robotV2.py | import requests
import json
import os
import re
import random
import memory
import asyncio
# OpenAI secret Key
API_KEY = 'xxxxxxxxxxxsecretAPIxxxxxxxxxx'
# Models: text-davinci-003,text-curie-001,text-babbage-001,text-ada-001
MODEL = 'gpt-3.5-turbo'
# Telegram secret access bot token
BOT_TOKEN = 'xxxxxxbotapikeyxxxxx'
# Specify all group ID the bot can respond too
ALLOWED_GROUP_ID = ['-100xxxxxxxx', '-1001xxxxxxxx1']
# Specify your Chat Bot handle
CHATBOT_HANDLE = '@ask_chatgptbot'
# Retrieve last ID message : Create an empty text file named chatgpt.txt, write 1 on the first line of the text file and save it, write the full path of your file below
FILENAME = '/xxxxxx/xxxxxxx/xxxxx/chatgpt.txt'
# 2a. Function that gets the response from OpenAI's chatbot
async def openAI(prompt, max_tokens):
# Make the request to the OpenAI API
response = requests.post(
'https://api.openai.com/v1/chat/completions',
headers={'Authorization': f'Bearer {API_KEY}'},
json={'model': MODEL, 'messages': [{"role": "user", "content": prompt}], 'temperature': 0.5, 'max_tokens': max_tokens},
timeout=10
)
result=response.json()
final_result=''
for i in range(0,len(result['choices'])):
final_result+=result['choices'][i]['message']['content']
return final_result
# 2b. Function that gets an Image from OpenAI
async def openAImage(prompt):
# Make the request to the OpenAI API
resp = requests.post(
'https://api.openai.com/v1/images/generations',
headers={'Authorization': f'Bearer {API_KEY}'},
json={'prompt': prompt,'n' : 1, 'size': '256x256'},
timeout=10
)
response_text = json.loads(resp.text)
#print(response_text['data'][0]['url'])
return response_text['data'][0]['url']
# Sending a message to a specific telegram group
async def telegram_bot_sendtext(bot_message,chat_id,msg_id):
data = {
'chat_id': chat_id,
'text': bot_message,
'reply_to_message_id': msg_id
}
response = requests.post(
'https://api.telegram.org/bot' + BOT_TOKEN + '/sendMessage',
json=data,
timeout=5
)
return response.json()
# Sending a image to a specific telegram group
async def telegram_bot_sendimage(image_url,group_id, msg_id):
data = {'chat_id': group_id, 'photo': image_url,'reply_to_message_id': msg_id}
url = 'https://api.telegram.org/bot' + BOT_TOKEN + '/sendPhoto'
response = requests.post(url, data=data, timeout=5)
return response.json()
# Checking for specific tone for message
async def checkTone(user_message):
bot_personality=''
match = re.search(r"/setTone\((.*?)\)", user_message, flags=re.IGNORECASE)
if match:
substring = match.group(1)
bot_personality = 'Answer in a '+ substring +' tone, '
user_message=user_message.replace('/setTone('+substring+')','')
return [user_message,bot_personality]
async def ChatGPTbot():
# Give your bot a personality using adjectives from the tone list
bot_personality = ''
# Leave write_history BLANK
write_history= ''
tone_list=['Friendly','Professional','Humorous','Sarcastic','Witty','Sassy','Charming','Cheeky','Quirky','Laid-back','Elegant','Playful','Soothing','Intense','Passionate']
with open(FILENAME) as f:
last_update = f.read()
f.close()
# Check for new messages in Telegram group
url = f'https://api.telegram.org/bot{BOT_TOKEN}/getUpdates?offset={last_update}'
response = requests.get(url, timeout=5)
data = json.loads(response.content)
print(data)
result=data['result'][len(data['result'])-1]
try:
# Checking for new message
if float(result['update_id']) > float(last_update):
# Checking for new messages that did not come from chatGPT
if not result['message']['from']['is_bot']:
last_update = str(int(result['update_id']))
# Retrieving the chat ID of the sender of the request
chat_id = str(result['message']['chat']['id'])
if chat_id in ALLOWED_GROUP_ID:
msg_id = str(int(result['message']['message_id']))
try:
# Greeting message for new participants
if 'new_chat_participant' in result['message']:
prompt = 'Write in a '+random.choice(tone_list)+' tone: ' + "I am here to assist you. Nice to meet you, "+result['message']['new_chat_participant']['first_name']
bot_response = await openAI(prompt, 200)
# Sending back response to telegram group
x = await telegram_bot_sendtext(bot_response, chat_id, msg_id)
except Exception as e:
print(e)
try:
if '/img' in result['message']['text']:
prompt = result['message']['text'].replace("/img", "")
bot_response = await openAImage(prompt)
x = await telegram_bot_sendimage(bot_response, chat_id, msg_id)
except Exception as e:
print(e)
boolean_active=False
# Checking that user mentionned chatbot's username in message
if CHATBOT_HANDLE in result['message']['text']:
prompt = result['message']['text'].replace(CHATBOT_HANDLE, "")
boolean_active=True
# Verifying that the user is responding to the ChatGPT bot
if 'reply_to_message' in result['message']:
if result['message']['reply_to_message']['from']['username'] == CHATBOT_HANDLE[1:]:
prompt = result['message']['text']
#Getting historical messages from user
write_history = await memory.get_channel_messages(chat_id,msg_id)
boolean_active=True
if boolean_active:
try:
prompt1=await checkTone(prompt)
prompt=prompt1[0]
bot_personality=prompt1[1]
boolean_active=True
except Exception as e:
print(e)
try:
if write_history!='':
prompt=write_history+"\n\nQ : "+prompt+"\n\n###\n\n"
bot_response = await openAI(f"{bot_personality}{prompt}",300)
if bot_response =='' :
bot_response = await openAI(f"{bot_personality}{vague_prompt}",300)
x = await telegram_bot_sendtext(bot_response, chat_id, msg_id)
except Exception as e:
print(e)
except Exception as e:
print(e)
# Updating file with last update ID
with open(FILENAME, 'w') as f:
f.write(last_update)
return "done"
async def main():
while True:
await ChatGPTbot()
await asyncio.sleep(5)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| [
"I am here to assist you. Nice to meet you, ",
"new_chat_participant",
"PLACEHOLDER\n\nQ : PLACEHOLDER\n\n###\n\n",
"first_name",
" tone: ",
"Write in a "
] |
2024-01-10 | Eloise1988/OPENAI | robot.py | # 1. Start by importing the necessary libraries and setting up the API clients
import requests
import json
import os
import threading
# OpenAI secret Key
API_KEY = 'xxxxxxxxxxxsecretAPIxxxxxxxxxx'
# Models: text-davinci-003,text-curie-001,text-babbage-001,text-ada-001
MODEL = 'gpt-3.5-turbo'
# Telegram secret access bot token
BOT_TOKEN = 'xxxxxxbotapikeyxxxxx'
# Defining the bot's personality using adjectives
BOT_PERSONALITY = 'Answer in a funny tone, '
# Specify your Chat Bot handle
CHATBOT_HANDLE = '@ask_chatgptbot'
# 2a. Function that gets the response from OpenAI's chatbot
def openAI(prompt):
# Make the request to the OpenAI API
response = requests.post(
'https://api.openai.com/v1/chat/completions',
headers={'Authorization': f'Bearer {API_KEY}'},
json={'model': MODEL, 'messages': [{"role": "user", "content": prompt}], 'temperature': 0.5, 'max_tokens': 300},
timeout=10
)
result=response.json()
final_result=''
for i in range(0,len(result['choices'])):
final_result+=result['choices'][i]['message']['content']
return final_result
# 2b. Function that gets an Image from OpenAI
def openAImage(prompt):
# Make the request to the OpenAI API
resp = requests.post(
'https://api.openai.com/v1/images/generations',
headers={'Authorization': f'Bearer {API_KEY}'},
json={'prompt': prompt,'n' : 1, 'size': '1024x1024'},
timeout=10
)
response_text = json.loads(resp.text)
return response_text['data'][0]['url']
# 3a. Function that sends a message to a specific telegram group
def telegram_bot_sendtext(bot_message,chat_id,msg_id):
data = {
'chat_id': chat_id,
'text': bot_message,
'reply_to_message_id': msg_id
}
response = requests.post(
'https://api.telegram.org/bot' + BOT_TOKEN + '/sendMessage',
json=data,
timeout=5
)
return response.json()
# 3b. Function that sends an image to a specific telegram group
def telegram_bot_sendimage(image_url, group_id, msg_id):
data = {
'chat_id': group_id,
'photo': image_url,
'reply_to_message_id': msg_id
}
url = 'https://api.telegram.org/bot' + BOT_TOKEN + '/sendPhoto'
response = requests.post(url, data=data, timeout=5)
return response.json()
# 4. Function that retrieves the latest requests from users in a Telegram group,
# generates a response using OpenAI, and sends the response back to the group.
def Chatbot():
# Retrieve last ID message from text file for ChatGPT update
cwd = os.getcwd()
filename = cwd + '/chatgpt.txt'
if not os.path.exists(filename):
with open(filename, "w") as f:
f.write("1")
else:
does_file_exist="File Exists"
with open(filename) as f:
last_update = f.read()
f.close()
# Check for new messages in Telegram group
url = f'https://api.telegram.org/bot{BOT_TOKEN}/getUpdates?offset={last_update}'
response = requests.get(url, timeout=5)
data = json.loads(response.content)
print(data)
for result in data['result']:
try:
# Checking for new message
if float(result['update_id']) > float(last_update):
# Checking for new messages that did not come from chatGPT
if not result['message']['from']['is_bot']:
last_update = str(int(result['update_id']))
# Retrieving message ID of the sender of the request
msg_id = str(int(result['message']['message_id']))
# Retrieving the chat ID
chat_id = str(result['message']['chat']['id'])
# Checking if user wants an image
if '/img' in result['message']['text']:
prompt = result['message']['text'].replace("/img", "")
bot_response = openAImage(prompt)
print(telegram_bot_sendimage(bot_response, chat_id, msg_id))
# Checking that user mentionned chatbot's username in message
if CHATBOT_HANDLE in result['message']['text'] or "/ask" in result['message']['text']:
prompt = result['message']['text'].replace(CHATBOT_HANDLE, "")
# Calling OpenAI API using the bot's personality
bot_response = openAI(f"{BOT_PERSONALITY}{prompt}")
# Sending back response to telegram group
print(telegram_bot_sendtext(bot_response, chat_id, msg_id))
# Verifying that the user is responding to the ChatGPT bot
if 'reply_to_message' in result['message']:
if result['message']['reply_to_message']['from']['username'] == CHATBOT_HANDLE[1:]:
prompt = result['message']['text']
bot_response = openAI(f"{BOT_PERSONALITY}{prompt}")
print(telegram_bot_sendtext(bot_response, chat_id, msg_id))
except Exception as e:
print(e)
# Updating file with last update ID
with open(filename, 'w') as f:
f.write(last_update)
f.close()
return "done"
# 5 Running a check every 5 seconds to check for new messages
def main():
timertime=5
Chatbot()
# 5 sec timer
threading.Timer(timertime, main).start()
# Run the main function
if __name__ == "__main__":
main()
| [] |
2024-01-10 | daveshap/ChatGPT_QA_Regenerative_Medicine | step02_embed_papers.py | import os
import openai
import json
from time import time,sleep
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
def save_file(filepath, content):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
def load_json(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return json.load(infile)
def save_json(filepath, payload):
with open(filepath, 'w', encoding='utf-8') as outfile:
json.dump(payload, outfile, ensure_ascii=False, sort_keys=True, indent=2)
def gpt3_embedding(content, engine='text-embedding-ada-002'):
#try:
print('CONTENT TO EMBED:', content)
content = content.encode(encoding='ASCII',errors='ignore').decode() # fix any UNICODE errors
response = openai.Embedding.create(input=content,engine=engine)
vector = response['data'][0]['embedding'] # this is a normal list
print('VECTOR:', vector)
return vector
#except:
# return None
def process_text_files(dir_path, out_path):
# Create the output directory if it doesn't exist
if not os.path.exists(out_path):
os.mkdir(out_path)
# Loop through all files in the directory
for filename in os.listdir(dir_path):
if filename.endswith(".txt"):
# Read in the text file
with open(dir_path + filename, "r", encoding="utf-8") as f:
text = f.read()
# Split the text into pages
pages = text.split("NEW PAGE")
# Generate embeddings for each page
embeddings = [gpt3_embedding(page) for page in pages]
# Create a dictionary with the filename and the pages and embeddings
output_dict = {"original_filename": filename,
"pages": [{"page_number": i+1,
"text": page,
"embedding": embedding} for i, (page, embedding) in enumerate(zip(pages, embeddings))]}
# Save the dictionary to a JSON file
#with open(os.path.join(out_path, filename.replace(".txt", ".json")), "w") as f:
# json.dump(output_dict, f)
save_json(os.path.join(out_path, filename.replace(".txt", ".json")), output_dict)
if __name__ == "__main__":
openai.api_key = open_file('key_openai.txt')
# Define the directories where the text files and output JSON files are located
dir_path = "papers_txt/"
out_path = "papers_json/"
# Process the text files
process_text_files(dir_path, out_path)
| [] |
2024-01-10 | daveshap/ChatGPT_QA_Regenerative_Medicine | step03_read_papers.py | import re
import os
import json
import openai
from time import time, sleep
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8', errors='ignore') as infile:
return infile.read()
def save_file(filepath, content):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
def load_json(filepath):
with open(filepath, 'r', encoding='utf-8', errors='ignore') as infile:
return json.load(infile)
def save_json(filepath, payload):
with open(filepath, 'w', encoding='utf-8') as outfile:
json.dump(payload, outfile, ensure_ascii=False, sort_keys=True, indent=2)
def chatgpt_completion(messages, temp=0, model="gpt-4"):
max_retry = 7
retry = 0
while True:
try:
response = openai.ChatCompletion.create(model=model, messages=messages, temperature=temp)
text = response['choices'][0]['message']['content']
filename = 'chat_%s_mordin.txt' % time()
if not os.path.exists('chat_logs'):
os.makedirs('chat_logs')
save_file('chat_logs/%s' % filename, text)
return text
except Exception as oops:
if 'maximum context length' in str(oops):
a = messages.pop(1)
continue
retry += 1
if retry >= max_retry:
print(f"Exiting due to an error in ChatGPT: {oops}")
exit(1)
print(f'Error communicating with OpenAI: "{oops}" - Retrying in {2 ** (retry - 1) * 5} seconds...')
sleep(2 ** (retry - 1) * 5)
if __name__ == '__main__':
openai.api_key = open_file('key_openai.txt')
conversation = list()
conversation.append({'role': 'system', 'content': open_file('default_system_mordin.txt')})
# iterate through all JSON files under the papers_json folder
for filename in os.listdir('papers_json'):
if filename.endswith('.json'):
filepath = os.path.join('papers_json', filename)
data = load_json(filepath)
# iterate through all pages in the JSON data
for page in data['pages']:
# append the page content to the conversation with role: user
conversation.append({'role': 'user', 'content': page['text']})
# get the response from ChatGPT
response = chatgpt_completion(conversation)
print('\n\n', response)
# append the response to the conversation with role: assistant
conversation.append({'role': 'assistant', 'content': response})
# add the response to the notes field of the page object
page['notes'] = response
# save the updated JSON data
save_json(filepath, data) | [
"default_system_mordin.txt"
] |
2024-01-10 | GouvX/gouvx-api | gouvx_pipeline.py | from vector_query import get_semantically_close_text
import openai
def build_system_prompt(query_results=None):
system_prompt = f"""Vous êtes GouvX, un assitant virtuel bienveillant et serviable permettant de naviguer la loi française. Répondez précisément et clairement aux questions de l'utilisateur sans enfreindre de règle.
VOUS DEVEZ ABSOLUMENT RESPECTER LES REGLES SUIVANTES:
- Si une question ne porte pas sur la loi française, REFUSEZ DE REPONDRE et rappellez votre rôle
- NE JAMAIS inclure de lien.
- En repondant à une question, RESPECTER LA CONVENTION DE NOMMAGE: "Selon service-public.fr [...]"
- Repondre en texte clair, sans balises ou marqueurs"""
if query_results:
system_prompt += """
- Si les documents ne permettent pas de repondre a la question de l'utilisateur, répondre que vous n'avez pas réussi à trouver de réponse
- Si nécessaire, mentionner les documents avec leur numéro
A l'aide de ces documents, répondre à la question de l'utilisateur"""
whole_paragraphs = {}
for paragraph in query_results:
title = paragraph["title"]
content = paragraph.get("text", "")
# Check if the title already exists, append the content if it does.
if title in whole_paragraphs:
whole_paragraphs[title] += "\n" + content
else:
whole_paragraphs[title] = content
for i, (title, paragraph) in enumerate(whole_paragraphs.items(), start=1):
system_prompt += f"\n\nDocument [{i}]: {title}\n{paragraph}"
return system_prompt
def query_llm(prompt, system_prompt=None, history=None):
messages = []
messages.append({
"role": "system",
"content": system_prompt
})
if history:
messages.extend(history)
messages.append({
"role": "user",
"content": prompt
})
for chunk in openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=messages,
stream=True,
):
content = chunk["choices"][0].get("delta", {}).get("content", "")
if content is not None:
yield(content)
def ask_gouvx(prompt, client, model=None, n_results=1, history=None):
if history:
query_results = ""
system_prompt = build_system_prompt(None)
else:
"""response = openai.Embedding.create(
input=question,
model="text-embedding-ada-002"
)
custom_vector = response['data'][0]['embedding']
response = get_semantically_close_text(client, embedding=custom_vector)
"""
response = get_semantically_close_text(client, text=prompt)
if response and response["data"]["Get"]["ServicePublic"] is not None:
query_results = response["data"]["Get"]["ServicePublic"][:n_results]
else :
raise ValueError('The weaviate query returned no response')
system_prompt = build_system_prompt(query_results)
chatgpt_generator = query_llm(prompt, system_prompt=system_prompt, history=history)
return query_results, chatgpt_generator
| [
"\n- Si les documents ne permettent pas de repondre a la question de l'utilisateur, répondre que vous n'avez pas réussi à trouver de réponse\n- Si nécessaire, mentionner les documents avec leur numéro\n\nA l'aide de ces documents, répondre à la question de l'utilisateur",
"Vous êtes GouvX, un assitant virtuel bienveillant et serviable permettant de naviguer la loi française. Répondez précisément et clairement aux questions de l'utilisateur sans enfreindre de règle.\n \nVOUS DEVEZ ABSOLUMENT RESPECTER LES REGLES SUIVANTES:\n- Si une question ne porte pas sur la loi française, REFUSEZ DE REPONDRE et rappellez votre rôle\n- NE JAMAIS inclure de lien.\n- En repondant à une question, RESPECTER LA CONVENTION DE NOMMAGE: \"Selon service-public.fr [...]\"\n- Repondre en texte clair, sans balises ou marqueurs",
"\n\nDocument [PLACEHOLDER]: PLACEHOLDER\nPLACEHOLDER"
] |
2024-01-10 | 4GeeksAcademy/JSON-Bourne-Capstone | src~api~routes.py | """
This module takes care of starting the API Server, Loading the DB and Adding the endpoints
"""
from flask import Flask, request, jsonify, url_for, Blueprint, session
from api.models import db, User, Post, Favorites, Comment
from flask_jwt_extended import jwt_required, get_jwt_identity, create_access_token
import sys
import openai
import os
api = Blueprint('api', __name__)
app = Flask(__name__)
openai.api_key = ("APIKEYHERE") #stored securely in config.py which is in the gitignore list
openai.Model.list()
@api.route('/signup', methods=['POST'])
def signup():
# Retrieve request data
username = request.json.get('username')
password = request.json.get('password')
# confirm_password = request.json.get('confirm_password')
# Check if the email is already registered
if User.query.filter_by(username=username).first():
return jsonify(message='Username already registered'), 409
# if password != confirm_password:
# return jsonify(message='Passwords do not match'), 200
# Create a new user object
new_user = User(username=username, password=password)
try:
db.session.add(new_user)
db.session.commit()
except Exception as e:
db.session.rollback()
print(sys.exc_info())
return jsonify(message='Failed to register user'), 500
user_id = new_user.id
return jsonify(message='User registered successfully', user_id=user_id), 201
@api.route('/login', methods=['POST'])
def login():
username = request.json.get("username", None)
password = request.json.get("password", None)
# Perform authentication
user = User.query.filter_by(username=username).first()
if user is None or not password == user.password:
if user is None or not user.check_password(password):
return jsonify({"msg": "Incorrect email or password"}), 401
# Generate access token
access_token = create_access_token(identity=username)
print (access_token)
return jsonify(access_token=access_token, user_id=user.id)
@api.route('/generate_image', methods=['POST'])
def generate_image():
data = request.get_json()
prompt = data.get('prompt')
number = data.get('number', 1)
size = data.get('size', '1024x1024')
response_format = data.get('response_format', 'url') # Change response_format to 'url'
try:
response = openai.Image.create(
prompt=prompt,
n=number,
size=size,
response_format=response_format
)
urls = []
if response_format == "url":
urls = [data['url'] for data in response.data]
response_headers = {
'Access-Control-Allow-Methods': 'POST'
}
return jsonify(urls), 200, response_headers
except Exception as e:
return jsonify(error=str(e)), 500
@api.route('/comments', methods=['POST'])
def comments():
data = request.get_json()
print("I AM DATA COMMENTS",data)
text = data.get('text')
user_id = data.get('user_id')
post_id = data.get('post_id')
new_comment = Comment(
text=text,
user_id=user_id,
post_id=post_id,
)
db.session.add(new_comment)
db.session.commit()
return jsonify(new_comment.serialize()), 200
@api.route('/comments', methods=['GET'])
def get_comments():
comment_list= Comment.query.all()
all_comments= list(map(lambda comment:comment.serialize(),comment_list))
return jsonify(all_comments), 200
# @api.route('/users/<int:id>', methods=['GET'])
# @jwt_required()
# def get_user_(id):
# user = User.query.filter_by(id=id).first()
# if not user:
# return jsonify({'message': 'User not found'}), 404
# request_token = request.headers.get('Authorization', '').split('Bearer ')[1] if 'Authorization' in request.headers else None
# if 'access_token' not in session or session['access_token'] != request_token:
# if request_token:
# session['access_token'] = request_token
# return jsonify({'message': 'Invalid access token'}), 401
# return jsonify(user.serialize()), 200
@api.route('/posts', methods=['GET'])
def get_posts():
posts = Post.query.all()
serialized_posts=[]
for post in posts:
serialized_posts.append(posts.serialize())
return jsonify(serialized_posts), 200
@api.route('/posts', methods=['POST'])
@jwt_required()
def create_post():
data = request.get_json()
user_id = data.get('user_id')
post_id = data.get('post_id')
user = User.query.filter_by(id=user_id).first()
if not user:
return jsonify({'message': 'User not found'}), 404
post = Post(
title=data['title'],
content=data['content'],
author=user,
post_id=post_id
)
db.session.add(post)
db.session.commit()
return jsonify({'message': 'Post created successfully', 'post_id': post.id}), 200
# @api.route("/post_images", methods=["POST"])
# def create_post_image():
# image = request.files['file']
# post_id = request.form.get("post_id")
# response = uploader.upload(
# image,
# resource_type="image",
# folder="posts"
# )
# new_post_image = Image(
# post_id=post_id,
# url=response["secure_url"],
# )
# db.session.add(new_post_image)
# db.session.commit()
# return jsonify(new_post_image.serialize()), 201
@api.route('/single/<int:theid>', methods=['GET'])
def get_single(theid):
item = User.query.get(theid)
if not item:
return jsonify({'message': 'Item not found'}), 404
return jsonify({'item': item.serialize()}), 200
@api.route('/users/favorites', methods=['POST'])
def add_favorite():
data = request.get_json()
print(data)
user_id = data.get('user_id')
print (user_id)
# Check if user_id is provided and is an integer
if not user_id or not isinstance(user_id, int):
return jsonify({'message': 'Invalid user ID'}), 400
user = User.query.filter_by(id=user_id).first()
if not user:
return jsonify({'message': 'User not found'}), 404
post_id = data.get('post_id')
if not post_id or not isinstance(post_id, int):
return jsonify({'message': 'Invalid post ID'}), 400
favorite = Favorites(
user_id=user_id,
post_id=post_id,
)
db.session.add(favorite)
db.session.commit()
favorites = Favorites.query.filter_by(user_id=user_id).all() # Use .all() to get all favorites
# Serialize the list of favorites
favorites_data = [favorite.serialize() for favorite in favorites]
return jsonify({'message': 'Favorite added successfully', 'favorites': favorites_data}), 200
@api.route('/users/favorites/<int:id>', methods=['DELETE'])
def delete_favorite(id):
current_user_id = get_jwt_identity()
favorite = Favorites.query.get(id)
if not favorite:
return jsonify({'message': 'Favorite not found'}), 404
db.session.delete(favorite)
db.session.commit()
return jsonify({'message': 'Favorite deleted successfully'}), 200
@api.route('/logout', methods=['POST'])
@jwt_required()
def logout():
# Remove the stored access token from the session
session.pop('access_token', None)
return jsonify({'message': 'Logged out successfully'}), 200
@api.route('/hello', methods=['GET'])
@jwt_required()
def hello():
# Retrieve the username from the token
username = get_jwt_identity()
# Create the message with the username
message = f"Hello, {username}"
# Return the message as JSON response
return jsonify({'message': message}), 200
if __name__ == "__main__":
api.run()
| [] |
2024-01-10 | burpheart/Gepetto-ChatGPT | Gepetto-ChatGPT.py | import functools
import json
import idaapi
import ida_hexrays
import ida_kernwin
import idc
# import openai
import os
import re
import textwrap
import threading
from revChatGPT.revChatGPT import Chatbot
config = {
"email": "<YOUR_EMAIL>",
"password": "<YOUR_PASSWORD>"#,
#"session_token": "",
#Use session_token or email/password. But the session_token has a very short validity
#"proxy": "127.0.0.1:7890"
}
ZH_CN = True # 是否使用中文代码解释 # Use Chinese explain
# =============================================================================
# Setup the context menu and hotkey in IDA
# =============================================================================
class Gepetto_CHATPlugin(idaapi.plugin_t):
flags = 0
explain_action_name = "Gepetto_CHAT:explain_function_CHAT"
explain_menu_path = "Edit/Gepetto_CHAT/Explain function_CHAT"
rename_action_name = "Gepetto_CHAT:rename_function_CHAT"
rename_menu_path = "Edit/Gepetto_CHAT/Rename variables_CHAT"
wanted_name = 'Gepetto_CHAT'
wanted_hotkey = ''
comment = "Uses ChatGPT to enrich the decompiler's output"
help = "See usage instructions on GitHub"
menu = None
def init(self):
# Check whether the decompiler is available
if not ida_hexrays.init_hexrays_plugin():
return idaapi.PLUGIN_SKIP
# Function explaining action
explain_action = idaapi.action_desc_t(self.explain_action_name,
'Explain function_CHAT',
ExplainHandler(),
"Ctrl+Alt+G",
'Use ChatGPT to explain the currently selected function',
199)
idaapi.register_action(explain_action)
idaapi.attach_action_to_menu(self.explain_menu_path, self.explain_action_name, idaapi.SETMENU_APP)
# Variable renaming action
rename_action = idaapi.action_desc_t(self.rename_action_name,
'Rename variables_CHAT',
RenameHandler(),
"Ctrl+Alt+R",
"Use ChatGPT to rename this function's variables",
199)
idaapi.register_action(rename_action)
idaapi.attach_action_to_menu(self.rename_menu_path, self.rename_action_name, idaapi.SETMENU_APP)
# Register context menu actions
self.menu = ContextMenuHooks()
self.menu.hook()
return idaapi.PLUGIN_KEEP
def run(self, arg):
pass
def term(self):
idaapi.detach_action_from_menu(self.explain_menu_path, self.explain_action_name)
idaapi.detach_action_from_menu(self.rename_menu_path, self.rename_action_name)
if self.menu:
self.menu.unhook()
return
# -----------------------------------------------------------------------------
class ContextMenuHooks(idaapi.UI_Hooks):
def finish_populating_widget_popup(self, form, popup):
# Add actions to the context menu of the Pseudocode view
if idaapi.get_widget_type(form) == idaapi.BWN_PSEUDOCODE:
idaapi.attach_action_to_popup(form, popup, Gepetto_CHATPlugin.explain_action_name, "Gepetto_CHAT/")
idaapi.attach_action_to_popup(form, popup, Gepetto_CHATPlugin.rename_action_name, "Gepetto_CHAT/")
# -----------------------------------------------------------------------------
def comment_callback(address, view, response):
"""
Callback that sets a comment at the given address.
:param address: The address of the function to comment
:param view: A handle to the decompiler window
:param response: The comment to add
"""
# Add newlines at the end of each sentence.
response = "\n".join(textwrap.wrap(response, 80, replace_whitespace=False))
# Add the response as a comment in IDA.
idc.set_func_cmt(address, response, 0)
# Refresh the window so the comment is displayed properly
if view:
view.refresh_view(False)
print("ChatGPT query finished!")
# -----------------------------------------------------------------------------
class ExplainHandler(idaapi.action_handler_t):
"""
This handler is tasked with querying ChatGPT for an explanation of the
given function. Once the reply is received, it is added as a function
comment.
"""
def __init__(self):
idaapi.action_handler_t.__init__(self)
def activate(self, ctx):
decompiler_output = ida_hexrays.decompile(idaapi.get_screen_ea())
v = ida_hexrays.get_widget_vdui(ctx.widget)
if ZH_CN:
query_model_async(
"对下面的C语言伪代码函数进行分析 推测关于该函数的使用环境和预期目的详细的函数功能等信息 并为这个函数取一个新的名字 不要返回其他的内容 (开始前加上GPTSTART 结束后加上GPTEND字符串)\n"
+ str(decompiler_output),
functools.partial(comment_callback, address=idaapi.get_screen_ea(), view=v))
else:
query_model_async(
"Can you explain what the following C function does and suggest a better name for it?(Add GPTSTART before the beginning of the conversation and GPTEND after the end.)\n"
+ str(decompiler_output),
functools.partial(comment_callback, address=idaapi.get_screen_ea(), view=v))
return 1
# This action is always available.
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
# -----------------------------------------------------------------------------
def rename_callback(address, view, response):
"""
Callback that extracts a JSON array of old names and new names from the
response and sets them in the pseudocode.
:param address: The address of the function to work on
:param view: A handle to the decompiler window
:param response: The response from ChatGPT
"""
j = re.search(r"\{[^}]*?\}", response)
if not j:
print(f"Error: couldn't extract a response from ChatGPT's output:\n{response}")
return
try:
names = json.loads(j.group(0))
except json.decoder.JSONDecodeError:
print(f"The data returned by the model cannot be parsed. Asking the model to fix it...")
query_model_async("Please fix the following JSON document:\n" + j.group(0),
functools.partial(rename_callback, address=idaapi.get_screen_ea(), view=view))
return
# The rename function needs the start address of the function
function_addr = idaapi.get_func(address).start_ea
replaced = []
for n in names:
if ida_hexrays.rename_lvar(function_addr, n, names[n]):
replaced.append(n)
# Update possible names left in the function comment
comment = idc.get_func_cmt(address, 0)
if comment and len(replaced) > 0:
for n in replaced:
comment = re.sub(r'\b%s\b' % n, names[n], comment)
idc.set_func_cmt(address, comment, 0)
# Refresh the window to show the new names
if view:
view.refresh_view(True)
print(f"ChatGPT query finished! {len(replaced)} variable(s) renamed.")
# -----------------------------------------------------------------------------
class RenameHandler(idaapi.action_handler_t):
"""
This handler requests new variable names from ChatGPT and updates the
decompiler's output.
"""
def __init__(self):
idaapi.action_handler_t.__init__(self)
def activate(self, ctx):
decompiler_output = ida_hexrays.decompile(idaapi.get_screen_ea())
v = ida_hexrays.get_widget_vdui(ctx.widget)
query_model_async(
"Analyze the following C function. Suggest better variable names, reply with a JSON array where keys are the original names and values are the proposed names. Do not explain anything, only print the JSON dictionary(Add GPTSTART before the beginning of the reply and GPTEND after the end.):\n" + str(
decompiler_output),
functools.partial(rename_callback, address=idaapi.get_screen_ea(), view=v))
return 1
# This action is always available.
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
# =============================================================================
# ChatGPT interaction
# =============================================================================
def query_model(query, cb):
"""
Function which sends a query to ChatGPT and calls a callback when the response is available.
Blocks until the response is received
:param query: The request to send to ChatGPT
:param cb: Tu function to which the response will be passed to.
"""
try:
chatbot = Chatbot(config, conversation_id=None)
response = chatbot.get_chat_response(query)['message']
if response.find("GPTSTART") == -1:
raise Exception("Unexpected response: " + response)
times = 1
retry = 0
data = response
print(f"response[" + str(times) + "]: " + response)
while response.find("GPTEND") == -1:
try:
times += 1
response = chatbot.get_chat_response("next")['message']
if response.find("GPTSTART") != -1:
times = 99
raise Exception("Duplicate responses appear: " + response)
print(f"response[" + str(times) + "]: " + response)
data += response
# print(message)
times = times - retry
retry = 0
except Exception as e:
if times > 5:
raise Exception("Request 5 times and still not return full results: " + response)
if retry > 3:
raise Exception("Retry 3 times and the request still fails: " + response)
retry += 1
ida_kernwin.execute_sync(functools.partial(cb, response=data.replace('GPTEND', '').replace('GPTSTART', '')),
ida_kernwin.MFF_WRITE)
except Exception as e:
print(f"General exception encountered while running the query: {str(e)}")
# -----------------------------------------------------------------------------
def query_model_async(query, cb):
"""
Function which sends a query to ChatGPT and calls a callback when the response is available.
:param query: The request to send to ChatGPT
:param cb: Tu function to which the response will be passed to.
"""
print("Request to ChatGPT sent...")
t = threading.Thread(target=query_model, args=[query, cb])
t.start()
# =============================================================================
# Main
# =============================================================================
def PLUGIN_ENTRY():
return Gepetto_CHATPlugin()
| [] |
2024-01-10 | SilveerDusk/calhacks | reflex~reflex-chat-main~webui~webui~state.py | import os, pyaudio, time, wave, librosa
from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq, WhisperProcessor, WhisperForConditionalGeneration
from bark import generate_audio
from scipy.io.wavfile import write as write_wav
import subprocess
import openai
import reflex as rx
openai.api_key = os.getenv("OPENAI_API_KEY")
openai.api_base = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1")
class QA(rx.Base):
"""A question and answer pair."""
question: str
answer: str
DEFAULT_CHATS = {
"Intros": [],
}
class State(rx.State):
"""The app state."""
# A dict from the chat name to the list of questions and answers.
chats: dict[str, list[QA]] = DEFAULT_CHATS
# The current chat name.
current_chat = "Intros"
# The current question.
question: str
# Whether we are processing the question.
processing: bool = False
# The name of the new chat.
new_chat_name: str = ""
# Whether the drawer is open.
drawer_open: bool = False
# Whether the modal is open.
modal_open: bool = False
def create_chat(self):
"""Create a new chat."""
# Add the new chat to the list of chats.
self.current_chat = self.new_chat_name
self.chats[self.new_chat_name] = []
# Toggle the modal.
self.modal_open = False
def toggle_modal(self):
"""Toggle the new chat modal."""
self.modal_open = not self.modal_open
def toggle_drawer(self):
"""Toggle the drawer."""
self.drawer_open = not self.drawer_open
def delete_chat(self):
"""Delete the current chat."""
del self.chats[self.current_chat]
if len(self.chats) == 0:
self.chats = DEFAULT_CHATS
self.current_chat = list(self.chats.keys())[0]
self.toggle_drawer()
def set_chat(self, chat_name: str):
"""Set the name of the current chat.
Args:
chat_name: The name of the chat.
"""
self.current_chat = chat_name
self.toggle_drawer()
@rx.var
def chat_titles(self) -> list[str]:
"""Get the list of chat titles.
Returns:
The list of chat names.
"""
return list(self.chats.keys())
async def process_question(self, form_data: dict[str, str]):
"""Get the response from the API.
Args:
form_data: A dict with the current question.
"""
audio = pyaudio.PyAudio()
stream = audio.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=1024)
frames = []
tend = time.time() + 13
while time.time() < tend:
data=stream.read(1024)
frames.append(data)
stream.stop_stream()
stream.close()
audio.terminate()
sound_file = wave.open("inputAudio.wav", "wb")
sound_file.setnchannels(1)
sound_file.setsampwidth(audio.get_sample_size(pyaudio.paInt16))
sound_file.setframerate(16000)
sound_file.writeframes(b''.join(frames))
sound_file.close()
processor = AutoProcessor.from_pretrained("openai/whisper-large-v2")
model = AutoModelForSpeechSeq2Seq.from_pretrained("openai/whisper-large-v2")
# load model and processor
processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2")
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2")
model.config.forced_decoder_ids = None
# load dummy dataset and read audio files
audio = librosa.load("inputAudio.wav", sr=16000)
input_features = processor(audio[0], sampling_rate=16000, return_tensors="pt").input_features
# generate token ids
predicted_ids = model.generate(input_features)
# decode token ids to text
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
with open("transcription.txt", "w") as f:
f.write(transcription[0])
# Check if the question is empty
if transcription[0] == "":
return
# Add the question to the list of questions.
qa = QA(question=transcription[0], answer="")
self.chats[self.current_chat].append(qa)
# Clear the input and start the processing.
self.processing = True
self.question = ""
yield
# Build the messages.
messages = [
{"role": "system", "content": "You are a friendly chatbot named AIfred."}
]
for qa in self.chats[self.current_chat]:
messages.append({"role": "user", "content": qa.question})
messages.append({"role": "assistant", "content": qa.answer})
# Remove the last mock answer.
messages = messages[:-1]
# Start a new session to answer the question.
session = openai.ChatCompletion.create(
model=os.getenv("OPENAI_MODEL", "gpt-3.5-turbo"),
messages=messages,
stream=True,
)
# Stream the results, yielding after every word.
for item in session:
if hasattr(item.choices[0].delta, "content"):
answer_text = item.choices[0].delta.content
self.chats[self.current_chat][-1].answer += answer_text
self.chats = self.chats
yield
# Toggle the processing flag.
audio_array = generate_audio(self.chats[self.current_chat][-1].answer)
write_wav("audio.wav", 16000, audio_array)
audio_file = "audio.wav"
subprocess.call(["afplay", audio_file])
self.processing = False
| [
"You are a friendly chatbot named AIfred."
] |
2024-01-10 | aasif057/ChatBot | backend.py | import pyttsx3
import speech_recognition as sr
import os
import openai
api_key = os.environ['OpenAI']
class ChatBot:
def __init__(self):
self.engine=pyttsx3.init('sapi5')
self.voices=self.engine.getProperty('voices')
self.engine.setProperty('voice',self.voices[0].id)
openai.api_key = api_key
self.r=sr.Recognizer()
def speak(self,audio):
self.engine.say(audio)
self.engine.runAndWait()
def close_speak(self):
self.engine.stop()
def take_commands(self):
with sr.Microphone() as Source:
print("Listening...")
self.r.pause_threshold=1
audio=self.r.listen(source = Source, timeout= None, phrase_time_limit= 5)
try:
print("Recognising...")
command=self.r.recognize_google(audio,language='en-in')
print(f"User asked for {command}")
return command
except Exception as e:
self.speak("Say That Again Please..")
command = self.take_commands()
return command
def get_response(self, user_input):
response = openai.Completion.create(
engine = "text-davinci-003",
prompt = user_input,
max_tokens= 4000,
temperature = 0.5
).choices[0].text
return response
# if __name__ == "__main__":
# chatbot = ChatBot()
# response = chatbot.get_response("Tell me about Wipro")
# print(response) | [] |
2024-01-10 | MuriloEduardo/new-poc-whatsapp-openia | openia.py | import os
import openai
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
history = []
def mount_messages(content, role="user"):
history.append({"role": role, "content": content})
return history
def extract_openia_response(response):
returned_response = response['choices'][0]['message']['content']
return returned_response
def get_ia_response(new_message):
messages = mount_messages(new_message)
try:
response = openai.ChatCompletion.create(
temperature=0.7,
messages=messages,
model="gpt-3.5-turbo",
)
extracted_response = extract_openia_response(response)
mount_messages(extracted_response, role="assistant")
return extracted_response
except Exception as e:
print(e)
| [] |
2024-01-10 | gventuri/pandas-ai | pandasai~smart_datalake~__init__.py | """
A smart dataframe class is a wrapper around the pandas/polars dataframe that allows you
to query it using natural language. It uses the LLMs to generate Python code from
natural language and then executes it on the dataframe.
Example:
```python
from pandasai.smart_dataframe import SmartDataframe
from pandasai.llm.openai import OpenAI
df = pd.read_csv("examples/data/Loan payments data.csv")
llm = OpenAI()
df = SmartDataframe(df, config={"llm": llm})
response = df.chat("What is the average loan amount?")
print(response)
# The average loan amount is $15,000.
```
"""
import uuid
import logging
import os
from pandasai.constants import DEFAULT_CHART_DIRECTORY, DEFAULT_FILE_PERMISSIONS
from pandasai.helpers.skills_manager import SkillsManager
from pandasai.pipelines.pipeline_context import PipelineContext
from pandasai.skills import Skill
from pandasai.helpers.query_exec_tracker import QueryExecTracker
from ..pipelines.smart_datalake_chat.generate_smart_datalake_pipeline import (
GenerateSmartDatalakePipeline,
)
from pandasai.helpers.output_types import output_type_factory
from pandasai.helpers.viz_library_types import viz_lib_type_factory
from pandasai.responses.context import Context
from pandasai.responses.response_parser import ResponseParser
from ..llm.base import LLM
from ..llm.langchain import LangchainLLM
from ..helpers.logger import Logger
from ..helpers.cache import Cache
from ..helpers.memory import Memory
from ..schemas.df_config import Config
from ..config import load_config
from ..prompts.base import AbstractPrompt
from ..prompts.correct_error_prompt import CorrectErrorPrompt
from typing import Union, List, Any, Optional
from ..helpers.code_manager import CodeManager
from ..helpers.df_info import DataFrameType
from ..helpers.path import find_project_root
from ..helpers.viz_library_types.base import VisualizationLibrary
class SmartDatalake:
_dfs: List[DataFrameType]
_config: Union[Config, dict]
_llm: LLM
_cache: Cache = None
_logger: Logger
_last_prompt_id: uuid.UUID
_conversation_id: uuid.UUID
_code_manager: CodeManager
_memory: Memory
_skills: SkillsManager
_instance: str
_query_exec_tracker: QueryExecTracker
_last_code_generated: str = None
_last_result: str = None
_last_error: str = None
_viz_lib: str = None
def __init__(
self,
dfs: List[Union[DataFrameType, Any]],
config: Optional[Union[Config, dict]] = None,
logger: Optional[Logger] = None,
memory: Optional[Memory] = None,
cache: Optional[Cache] = None,
):
"""
Args:
dfs (List[Union[DataFrameType, Any]]): List of dataframes to be used
config (Union[Config, dict], optional): Config to be used. Defaults to None.
logger (Logger, optional): Logger to be used. Defaults to None.
"""
self._load_config(config)
self.initialize()
if logger:
self.logger = logger
else:
self.logger = Logger(
save_logs=self._config.save_logs, verbose=self._config.verbose
)
self._load_dfs(dfs)
self._memory = memory or Memory()
self._code_manager = CodeManager(
dfs=self._dfs,
config=self._config,
logger=self.logger,
)
self._skills = SkillsManager()
if cache:
self._cache = cache
elif self._config.enable_cache:
self._cache = Cache()
context = Context(self._config, self.logger, self.engine)
if self._config.response_parser:
self._response_parser = self._config.response_parser(context)
else:
self._response_parser = ResponseParser(context)
if self._config.data_viz_library:
self._viz_lib = self._config.data_viz_library.value
self._conversation_id = uuid.uuid4()
self._instance = self.__class__.__name__
self._query_exec_tracker = QueryExecTracker(
server_config=self._config.log_server,
)
def set_instance_type(self, type_: str):
self._instance = type_
def is_related_query(self, flag: bool):
self._query_exec_tracker.set_related_query(flag)
def initialize(self):
"""Initialize the SmartDatalake, create auxiliary directories.
If 'save_charts' option is enabled, create '.exports/charts directory'
in case if it doesn't exist.
If 'enable_cache' option is enabled, Create './cache' in case if it
doesn't exist.
Returns:
None
"""
if self._config.save_charts:
charts_dir = self._config.save_charts_path
# Add project root path if save_charts_path is default
if self._config.save_charts_path == DEFAULT_CHART_DIRECTORY:
try:
charts_dir = os.path.join(
(find_project_root()), self._config.save_charts_path
)
self._config.save_charts_path = charts_dir
except ValueError:
charts_dir = os.path.join(
os.getcwd(), self._config.save_charts_path
)
os.makedirs(charts_dir, mode=DEFAULT_FILE_PERMISSIONS, exist_ok=True)
if self._config.enable_cache:
try:
cache_dir = os.path.join((find_project_root()), "cache")
except ValueError:
cache_dir = os.path.join(os.getcwd(), "cache")
os.makedirs(cache_dir, mode=DEFAULT_FILE_PERMISSIONS, exist_ok=True)
def _load_dfs(self, dfs: List[Union[DataFrameType, Any]]):
"""
Load all the dataframes to be used in the smart datalake.
Args:
dfs (List[Union[DataFrameType, Any]]): List of dataframes to be used
"""
from ..smart_dataframe import SmartDataframe
smart_dfs = []
for df in dfs:
if not isinstance(df, SmartDataframe):
smart_dfs.append(
SmartDataframe(df, config=self._config, logger=self.logger)
)
else:
smart_dfs.append(df)
self._dfs = smart_dfs
def _load_config(self, config: Union[Config, dict]):
"""
Load a config to be used to run the queries.
Args:
config (Union[Config, dict]): Config to be used
"""
config = load_config(config)
if config.get("llm"):
self._load_llm(config["llm"])
config["llm"] = self._llm
if config.get("data_viz_library"):
self._load_data_viz_library(config["data_viz_library"])
config["data_viz_library"] = self._data_viz_library
self._config = Config(**config)
def _load_llm(self, llm: LLM):
"""
Load a LLM to be used to run the queries.
Check if it is a PandasAI LLM or a Langchain LLM.
If it is a Langchain LLM, wrap it in a PandasAI LLM.
Args:
llm (object): LLMs option to be used for API access
Raises:
BadImportError: If the LLM is a Langchain LLM but the langchain package
is not installed
"""
if hasattr(llm, "_llm_type"):
llm = LangchainLLM(llm)
self._llm = llm
def _load_data_viz_library(self, data_viz_library: str):
"""
Load the appropriate instance for viz library type to use.
Args:
data_viz_library (enum): TODO
Raises:
TODO
"""
self._data_viz_library = VisualizationLibrary.DEFAULT.value
if data_viz_library in (item.value for item in VisualizationLibrary):
self._data_viz_library = data_viz_library
def add_skills(self, *skills: Skill):
"""
Add Skills to PandasAI
"""
self._skills.add_skills(*skills)
def _assign_prompt_id(self):
"""Assign a prompt ID"""
self._last_prompt_id = uuid.uuid4()
if self.logger:
self.logger.log(f"Prompt ID: {self._last_prompt_id}")
def _get_prompt(
self,
key: str,
default_prompt: AbstractPrompt,
default_values: Optional[dict] = None,
) -> AbstractPrompt:
"""
Return a prompt by key.
Args:
key (str): The key of the prompt
default_prompt (Type[AbstractPrompt]): The default prompt to use
default_values (Optional[dict], optional): The default values to use for the
prompt. Defaults to None.
Returns:
AbstractPrompt: The prompt
"""
if default_values is None:
default_values = {}
custom_prompt = self._config.custom_prompts.get(key)
prompt = custom_prompt or default_prompt
# set default values for the prompt
prompt.set_config(self._config)
if "dfs" not in default_values:
prompt.set_var("dfs", self._dfs)
if "conversation" not in default_values:
prompt.set_var("conversation", self._memory.get_conversation())
if "prev_conversation" not in default_values:
prompt.set_var(
"prev_conversation", self._memory.get_previous_conversation()
)
if "last_message" not in default_values:
prompt.set_var("last_message", self._memory.get_last_message())
# Adds the skills to prompt if exist else display nothing
skills_prompt = self._skills.prompt_display()
prompt.set_var("skills", skills_prompt if skills_prompt is not None else "")
for key, value in default_values.items():
prompt.set_var(key, value)
self.logger.log(f"Using prompt: {prompt}")
return prompt
def chat(self, query: str, output_type: Optional[str] = None):
"""
Run a query on the dataframe.
Args:
query (str): Query to run on the dataframe
output_type (Optional[str]): Add a hint for LLM which
type should be returned by `analyze_data()` in generated
code. Possible values: "number", "dataframe", "plot", "string":
* number - specifies that user expects to get a number
as a response object
* dataframe - specifies that user expects to get
pandas/polars dataframe as a response object
* plot - specifies that user expects LLM to build
a plot
* string - specifies that user expects to get text
as a response object
If none `output_type` is specified, the type can be any
of the above or "text".
Raises:
ValueError: If the query is empty
"""
pipeline_context = self.prepare_context_for_smart_datalake_pipeline(
query=query, output_type=output_type
)
try:
result = GenerateSmartDatalakePipeline(pipeline_context, self.logger).run()
except Exception as exception:
self.last_error = str(exception)
self._query_exec_tracker.success = False
self._query_exec_tracker.publish()
return (
"Unfortunately, I was not able to answer your question, "
"because of the following error:\n"
f"\n{exception}\n"
)
self.update_intermediate_value_post_pipeline_execution(pipeline_context)
# publish query tracker
self._query_exec_tracker.publish()
return result
def _validate_output(self, result: dict, output_type: Optional[str] = None):
"""
Validate the output of the code execution.
Args:
result (Any): Result of executing the code
output_type (Optional[str]): Add a hint for LLM which
type should be returned by `analyze_data()` in generated
code. Possible values: "number", "dataframe", "plot", "string":
* number - specifies that user expects to get a number
as a response object
* dataframe - specifies that user expects to get
pandas/polars dataframe as a response object
* plot - specifies that user expects LLM to build
a plot
* string - specifies that user expects to get text
as a response object
If none `output_type` is specified, the type can be any
of the above or "text".
Raises:
(ValueError): If the output is not valid
"""
output_type_helper = output_type_factory(output_type, logger=self.logger)
result_is_valid, validation_logs = output_type_helper.validate(result)
if result_is_valid:
self._query_exec_tracker.add_step(
{
"type": "Validating Output",
"success": True,
"message": "Output Validation Successful",
}
)
else:
self.logger.log("\n".join(validation_logs), level=logging.WARNING)
self._query_exec_tracker.add_step(
{
"type": "Validating Output",
"success": False,
"message": "Output Validation Failed",
}
)
raise ValueError("Output validation failed")
def _get_viz_library_type(self) -> str:
"""
Get the visualization library type based on the configured library.
Returns:
(str): Visualization library type
"""
viz_lib_helper = viz_lib_type_factory(self._viz_lib, logger=self.logger)
return viz_lib_helper.template_hint
def prepare_context_for_smart_datalake_pipeline(
self, query: str, output_type: Optional[str] = None
) -> PipelineContext:
"""
Prepare Pipeline Context to initiate Smart Data Lake Pipeline.
Args:
query (str): Query to run on the dataframe
output_type (Optional[str]): Add a hint for LLM which
type should be returned by `analyze_data()` in generated
code. Possible values: "number", "dataframe", "plot", "string":
* number - specifies that user expects to get a number
as a response object
* dataframe - specifies that user expects to get
pandas/polars dataframe as a response object
* plot - specifies that user expects LLM to build
a plot
* string - specifies that user expects to get text
as a response object
If none `output_type` is specified, the type can be any
of the above or "text".
Returns:
PipelineContext: The Pipeline Context to be used by Smart Data Lake Pipeline.
"""
self._query_exec_tracker.start_new_track()
self.logger.log(f"Question: {query}")
self.logger.log(f"Running PandasAI with {self._llm.type} LLM...")
self._assign_prompt_id()
self._query_exec_tracker.add_query_info(
self._conversation_id, self._instance, query, output_type
)
self._query_exec_tracker.add_dataframes(self._dfs)
self._memory.add(query, True)
output_type_helper = output_type_factory(output_type, logger=self.logger)
viz_lib_helper = viz_lib_type_factory(self._viz_lib, logger=self.logger)
pipeline_context = PipelineContext(
dfs=self.dfs,
config=self.config,
memory=self.memory,
cache=self.cache,
query_exec_tracker=self._query_exec_tracker,
)
pipeline_context.add_intermediate_value("is_present_in_cache", False)
pipeline_context.add_intermediate_value(
"output_type_helper", output_type_helper
)
pipeline_context.add_intermediate_value("viz_lib_helper", viz_lib_helper)
pipeline_context.add_intermediate_value(
"last_code_generated", self._last_code_generated
)
pipeline_context.add_intermediate_value("get_prompt", self._get_prompt)
pipeline_context.add_intermediate_value("last_prompt_id", self.last_prompt_id)
pipeline_context.add_intermediate_value("skills", self._skills)
pipeline_context.add_intermediate_value("code_manager", self._code_manager)
pipeline_context.add_intermediate_value(
"response_parser", self._response_parser
)
return pipeline_context
def update_intermediate_value_post_pipeline_execution(
self, pipeline_context: PipelineContext
):
"""
After the Smart Data Lake Pipeline has executed, update values of Smart Data Lake object.
Args:
pipeline_context (PipelineContext): Pipeline Context after the Smart Data Lake pipeline execution
"""
self._last_code_generated = pipeline_context.get_intermediate_value(
"last_code_generated"
)
self._last_result = pipeline_context.get_intermediate_value("last_result")
def _retry_run_code(self, code: str, e: Exception) -> List:
"""
A method to retry the code execution with error correction framework.
Args:
code (str): A python code
e (Exception): An exception
dataframes
Returns (str): A python code
"""
self.logger.log(f"Failed with error: {e}. Retrying", logging.ERROR)
default_values = {
"engine": self._dfs[0].engine,
"code": code,
"error_returned": e,
}
error_correcting_instruction = self._get_prompt(
"correct_error",
default_prompt=CorrectErrorPrompt(),
default_values=default_values,
)
return self._llm.generate_code(error_correcting_instruction)
def clear_memory(self):
"""
Clears the memory
"""
self._memory.clear()
self._conversation_id = uuid.uuid4()
@property
def engine(self):
return self._dfs[0].engine
@property
def last_prompt(self):
return self._llm.last_prompt
@property
def last_prompt_id(self) -> uuid.UUID:
"""Return the id of the last prompt that was run."""
if self._last_prompt_id is None:
raise ValueError("Pandas AI has not been run yet.")
return self._last_prompt_id
@property
def logs(self):
return self.logger.logs
@property
def logger(self):
return self._logger
@logger.setter
def logger(self, logger):
self._logger = logger
@property
def config(self):
return self._config
@property
def cache(self):
return self._cache
@property
def verbose(self):
return self._config.verbose
@verbose.setter
def verbose(self, verbose: bool):
self._config.verbose = verbose
self._logger.verbose = verbose
@property
def save_logs(self):
return self._config.save_logs
@save_logs.setter
def save_logs(self, save_logs: bool):
self._config.save_logs = save_logs
self._logger.save_logs = save_logs
@property
def enforce_privacy(self):
return self._config.enforce_privacy
@enforce_privacy.setter
def enforce_privacy(self, enforce_privacy: bool):
self._config.enforce_privacy = enforce_privacy
@property
def enable_cache(self):
return self._config.enable_cache
@enable_cache.setter
def enable_cache(self, enable_cache: bool):
self._config.enable_cache = enable_cache
if enable_cache:
if self.cache is None:
self._cache = Cache()
else:
self._cache = None
@property
def use_error_correction_framework(self):
return self._config.use_error_correction_framework
@use_error_correction_framework.setter
def use_error_correction_framework(self, use_error_correction_framework: bool):
self._config.use_error_correction_framework = use_error_correction_framework
@property
def custom_prompts(self):
return self._config.custom_prompts
@custom_prompts.setter
def custom_prompts(self, custom_prompts: dict):
self._config.custom_prompts = custom_prompts
@property
def save_charts(self):
return self._config.save_charts
@save_charts.setter
def save_charts(self, save_charts: bool):
self._config.save_charts = save_charts
@property
def save_charts_path(self):
return self._config.save_charts_path
@save_charts_path.setter
def save_charts_path(self, save_charts_path: str):
self._config.save_charts_path = save_charts_path
@property
def custom_whitelisted_dependencies(self):
return self._config.custom_whitelisted_dependencies
@custom_whitelisted_dependencies.setter
def custom_whitelisted_dependencies(
self, custom_whitelisted_dependencies: List[str]
):
self._config.custom_whitelisted_dependencies = custom_whitelisted_dependencies
@property
def max_retries(self):
return self._config.max_retries
@max_retries.setter
def max_retries(self, max_retries: int):
self._config.max_retries = max_retries
@property
def llm(self):
return self._llm
@llm.setter
def llm(self, llm: LLM):
self._load_llm(llm)
@property
def last_code_generated(self):
return self._last_code_generated
@last_code_generated.setter
def last_code_generated(self, last_code_generated: str):
self._last_code_generated = last_code_generated
@property
def last_code_executed(self):
return self._code_manager.last_code_executed
@property
def last_result(self):
return self._last_result
@last_result.setter
def last_result(self, last_result: str):
self._last_result = last_result
@property
def last_error(self):
return self._last_error
@last_error.setter
def last_error(self, last_error: str):
self._last_error = last_error
@property
def dfs(self):
return self._dfs
@property
def memory(self):
return self._memory
@property
def instance(self):
return self._instance
@property
def last_query_log_id(self):
return self._query_exec_tracker.last_log_id
| [] |
2024-01-10 | gventuri/pandas-ai | examples~from_csv.py | """Example of using PandasAI with a CSV file."""
from pandasai import SmartDataframe
from pandasai.llm import OpenAI
llm = OpenAI()
df = SmartDataframe("examples/data/Loan payments data.csv", config={"llm": llm})
response = df.chat("How many loans are from men and have been paid off?")
print(response)
# Output: 247 loans have been paid off by men.
| [
"How many loans are from men and have been paid off?"
] |
2024-01-10 | gventuri/pandas-ai | examples~sql_direct_config.py | """Example of using PandasAI with a CSV file."""
from pandasai import SmartDatalake
from pandasai.llm import OpenAI
from pandasai.connectors import PostgreSQLConnector
from pandasai.smart_dataframe import SmartDataframe
# With a PostgreSQL database
order = PostgreSQLConnector(
config={
"host": "localhost",
"port": 5432,
"database": "testdb",
"username": "postgres",
"password": "123456",
"table": "orders",
}
)
order_details = PostgreSQLConnector(
config={
"host": "localhost",
"port": 5432,
"database": "testdb",
"username": "postgres",
"password": "123456",
"table": "order_details",
}
)
products = PostgreSQLConnector(
config={
"host": "localhost",
"port": 5432,
"database": "testdb",
"username": "postgres",
"password": "123456",
"table": "products",
}
)
llm = OpenAI("OPEN_API_KEY")
order_details_smart_df = SmartDataframe(
order_details,
config={"llm": llm, "direct_sql": True},
description="Contain user order details",
)
df = SmartDatalake(
[order_details_smart_df, order, products],
config={"llm": llm, "direct_sql": True},
)
response = df.chat("return orders with count of distinct products")
print(response)
| [
"return orders with count of distinct products"
] |
2024-01-10 | gventuri/pandas-ai | examples~from_dataframe.py | """Example of using PandasAI with a Pandas DataFrame"""
import pandas as pd
from data.sample_dataframe import dataframe
from pandasai import SmartDataframe
from pandasai.llm import OpenAI
df = pd.DataFrame(dataframe)
llm = OpenAI()
df = SmartDataframe(df=pd.DataFrame(dataframe), config={"llm": llm})
response = df.chat("Calculate the sum of the gdp of north american countries")
print(response)
# Output: 20901884461056
| [
"Calculate the sum of the gdp of north american countries"
] |
2024-01-10 | gventuri/pandas-ai | examples~from_airtable.py | from pandasai.connectors import AirtableConnector
from pandasai.llm import OpenAI
from pandasai import SmartDataframe
airtable_connectors = AirtableConnector(
config={
"api_key": "AIRTABLE_API_TOKEN",
"table": "AIRTABLE_TABLE_NAME",
"base_id": "AIRTABLE_BASE_ID",
"where": [
# this is optional and filters the data to
# reduce the size of the dataframe
["Status", "=", "In progress"]
],
}
)
llm = OpenAI("OPENAI_API_KEY")
df = SmartDataframe(airtable_connectors, config={"llm": llm})
response = df.chat("How many rows are there in data ?")
print(response)
| [
"How many rows are there in data ?"
] |
2024-01-10 | gventuri/pandas-ai | examples~show_chart.py | """Example of using PandasAI to generate a chart from a Pandas DataFrame"""
import pandas as pd
from data.sample_dataframe import dataframe
from pandasai import SmartDataframe
from pandasai.llm import OpenAI
df = pd.DataFrame(dataframe)
llm = OpenAI()
df = SmartDataframe(df, config={"llm": llm, "verbose": True})
response = df.chat(
"Plot the histogram of countries showing for each the gpd,"
" using different colors for each bar",
)
# Output: check out images/histogram-chart.png
| [
"Plot the histogram of countries showing for each the gpd, using different colors for each bar"
] |
2024-01-10 | gventuri/pandas-ai | examples~from_excel.py | """Example of using PandasAI with am Excel file."""
from pandasai import SmartDataframe
from pandasai.llm import OpenAI
llm = OpenAI()
df = SmartDataframe("examples/data/Loan payments data.xlsx", config={"llm": llm})
response = df.chat("How many loans are from men and have been paid off?")
print(response)
# Output: 247 loans have been paid off by men.
| [
"How many loans are from men and have been paid off?"
] |
2024-01-10 | gventuri/pandas-ai | examples~using_streamlit.py | """
Example of using displaying PandasAI charts in Streamlit
Usage:
streamlit run examples/using_streamlit.py
"""
import pandas as pd
from pandasai import SmartDatalake
from pandasai.llm import OpenAI
from pandasai.responses.streamlit_response import StreamlitResponse
employees_df = pd.DataFrame(
{
"EmployeeID": [1, 2, 3, 4, 5],
"Name": ["John", "Emma", "Liam", "Olivia", "William"],
"Department": ["HR", "Sales", "IT", "Marketing", "Finance"],
}
)
salaries_df = pd.DataFrame(
{
"EmployeeID": [1, 2, 3, 4, 5],
"Salary": [5000, 6000, 4500, 7000, 5500],
}
)
llm = OpenAI()
dl = SmartDatalake(
[employees_df, salaries_df],
config={"llm": llm, "verbose": True, "response_parser": StreamlitResponse},
)
dl.chat("Plot salaries against employee name")
| [
"Plot salaries against employee name"
] |
2024-01-10 | gventuri/pandas-ai | tests~llms~test_openai.py | """Unit tests for the openai LLM class"""
import openai
import pytest
from pandasai.exceptions import APIKeyNotFoundError, UnsupportedModelError
from pandasai.llm import OpenAI
from pandasai.prompts import AbstractPrompt
class OpenAIObject:
def __init__(self, dictionary):
self.__dict__.update(dictionary)
class TestOpenAILLM:
"""Unit tests for the openai LLM class"""
@pytest.fixture
def prompt(self):
class MockAbstractPrompt(AbstractPrompt):
template: str = "instruction"
return MockAbstractPrompt()
def test_type_without_token(self):
with pytest.raises(APIKeyNotFoundError):
OpenAI().type
def test_type_with_token(self):
assert OpenAI(api_token="test").type == "openai"
def test_proxy(self):
proxy = "http://proxy.mycompany.com:8080"
client = OpenAI(api_token="test", openai_proxy=proxy)
assert client.openai_proxy == proxy
assert openai.proxy["http"] == proxy
assert openai.proxy["https"] == proxy
def test_params_setting(self):
llm = OpenAI(
api_token="test",
model="gpt-3.5-turbo",
temperature=0.5,
max_tokens=50,
top_p=1.0,
frequency_penalty=2.0,
presence_penalty=3.0,
stop=["\n"],
)
assert llm.model == "gpt-3.5-turbo"
assert llm.temperature == 0.5
assert llm.max_tokens == 50
assert llm.top_p == 1.0
assert llm.frequency_penalty == 2.0
assert llm.presence_penalty == 3.0
assert llm.stop == ["\n"]
def test_completion(self, mocker):
expected_text = "This is the generated text."
expected_response = OpenAIObject(
{
"choices": [{"text": expected_text}],
"usage": {
"prompt_tokens": 2,
"completion_tokens": 1,
"total_tokens": 3,
},
"model": "gpt-35-turbo",
}
)
openai = OpenAI(api_token="test")
mocker.patch.object(openai, "completion", return_value=expected_response)
result = openai.completion("Some prompt.")
openai.completion.assert_called_once_with("Some prompt.")
assert result == expected_response
def test_chat_completion(self, mocker):
openai = OpenAI(api_token="test")
expected_response = OpenAIObject(
{
"choices": [
{
"text": "Hello, how can I help you today?",
"index": 0,
"logprobs": None,
"finish_reason": "stop",
"start_text": "",
}
]
}
)
mocker.patch.object(openai, "chat_completion", return_value=expected_response)
result = openai.chat_completion("Hi")
openai.chat_completion.assert_called_once_with("Hi")
assert result == expected_response
def test_call_with_unsupported_model(self, prompt):
with pytest.raises(
UnsupportedModelError,
match=(
"Unsupported model: The model 'not a model' doesn't exist "
"or is not supported yet."
),
):
llm = OpenAI(api_token="test", model="not a model")
llm.call(instruction=prompt)
def test_call_supported_completion_model(self, mocker, prompt):
openai = OpenAI(api_token="test", model="gpt-3.5-turbo-instruct")
mocker.patch.object(openai, "completion", return_value="response")
result = openai.call(instruction=prompt)
assert result == "response"
def test_call_supported_chat_model(self, mocker, prompt):
openai = OpenAI(api_token="test", model="gpt-4")
mocker.patch.object(openai, "chat_completion", return_value="response")
result = openai.call(instruction=prompt)
assert result == "response"
def test_call_finetuned_model(self, mocker, prompt):
openai = OpenAI(api_token="test", model="ft:gpt-3.5-turbo:my-org:custom_suffix:id")
mocker.patch.object(openai, "chat_completion", return_value="response")
result = openai.call(instruction=prompt)
assert result == "response"
| [
"instruction"
] |
2024-01-10 | gventuri/pandas-ai | examples~using_workspace_env.py | import os
import pandas as pd
from pandasai import Agent
from pandasai.llm.openai import OpenAI
from pandasai.schemas.df_config import Config
employees_data = {
"EmployeeID": [1, 2, 3, 4, 5],
"Name": ["John", "Emma", "Liam", "Olivia", "William"],
"Department": ["HR", "Sales", "IT", "Marketing", "Finance"],
}
salaries_data = {
"EmployeeID": [1, 2, 3, 4, 5],
"Salary": [5000, 6000, 4500, 7000, 5500],
}
employees_df = pd.DataFrame(employees_data)
salaries_df = pd.DataFrame(salaries_data)
os.environ["PANDASAI_WORKSPACE"] = "workspace dir path"
llm = OpenAI("YOUR_API_KEY")
config__ = {"llm": llm, "save_charts": False}
agent = Agent(
[employees_df, salaries_df],
config=Config(**config__),
memory_size=10,
)
# Chat with the agent
response = agent.chat("plot salary against department?")
print(response)
| [
"plot salary against department?"
] |
2024-01-10 | gventuri/pandas-ai | tests~test_smartdatalake.py | """Unit tests for the SmartDatalake class"""
import os
import sys
from typing import Optional
from unittest.mock import Mock, patch
import pandas as pd
import pytest
from pandasai import SmartDataframe, SmartDatalake
from pandasai.connectors.base import SQLConnectorConfig
from pandasai.connectors.sql import PostgreSQLConnector, SQLConnector
from pandasai.helpers.code_manager import CodeManager
from pandasai.llm.fake import FakeLLM
from pandasai.constants import DEFAULT_FILE_PERMISSIONS
from langchain import OpenAI
class TestSmartDatalake:
"""Unit tests for the SmartDatlake class"""
@pytest.fixture
def llm(self, output: Optional[str] = None):
return FakeLLM(output=output)
@pytest.fixture
def sample_df(self):
return pd.DataFrame(
{
"country": [
"United States",
"United Kingdom",
"France",
"Germany",
"Italy",
"Spain",
"Canada",
"Australia",
"Japan",
"China",
],
"gdp": [
19294482071552,
2891615567872,
2411255037952,
3435817336832,
1745433788416,
1181205135360,
1607402389504,
1490967855104,
4380756541440,
14631844184064,
],
"happiness_index": [
6.94,
7.16,
6.66,
7.07,
6.38,
6.4,
7.23,
7.22,
5.87,
5.12,
],
}
)
@pytest.fixture
@patch("pandasai.connectors.sql.create_engine", autospec=True)
def sql_connector(self, create_engine):
# Define your ConnectorConfig instance here
self.config = SQLConnectorConfig(
dialect="mysql",
driver="pymysql",
username="your_username",
password="your_password",
host="your_host",
port=443,
database="your_database",
table="your_table",
where=[["column_name", "=", "value"]],
).dict()
# Create an instance of SQLConnector
return SQLConnector(self.config)
@pytest.fixture
@patch("pandasai.connectors.sql.create_engine", autospec=True)
def pgsql_connector(self, create_engine):
# Define your ConnectorConfig instance here
self.config = SQLConnectorConfig(
dialect="mysql",
driver="pymysql",
username="your_username",
password="your_password",
host="your_host",
port=443,
database="your_database",
table="your_table",
where=[["column_name", "=", "value"]],
).dict()
# Create an instance of SQLConnector
return PostgreSQLConnector(self.config)
@pytest.fixture
def smart_dataframe(self, llm, sample_df):
return SmartDataframe(sample_df, config={"llm": llm, "enable_cache": False})
@pytest.fixture
def smart_datalake(self, smart_dataframe: SmartDataframe):
return smart_dataframe.lake
def test_load_llm_with_pandasai_llm(self, smart_datalake: SmartDatalake, llm):
smart_datalake._llm = None
assert smart_datalake._llm is None
smart_datalake._load_llm(llm)
assert smart_datalake._llm == llm
def test_load_llm_with_langchain_llm(self, smart_datalake: SmartDatalake, llm):
langchain_llm = OpenAI(openai_api_key="fake_key")
smart_datalake._llm = None
assert smart_datalake._llm is None
smart_datalake._load_llm(langchain_llm)
assert smart_datalake._llm._langchain_llm == langchain_llm
@patch.object(
CodeManager,
"execute_code",
return_value={
"type": "string",
"value": "There are 10 countries in the dataframe.",
},
)
def test_last_result_is_saved(self, _mocked_method, smart_datalake: SmartDatalake):
assert smart_datalake.last_result is None
_mocked_method.__name__ = "execute_code"
smart_datalake.chat("How many countries are in the dataframe?")
assert smart_datalake.last_result == {
"type": "string",
"value": "There are 10 countries in the dataframe.",
}
@patch.object(
CodeManager,
"execute_code",
return_value={
"type": "string",
"value": "There are 10 countries in the dataframe.",
},
)
@patch("pandasai.helpers.query_exec_tracker.QueryExecTracker.publish")
def test_query_tracker_publish_called_in_chat_method(
self, mock_query_tracker_publish, _mocked_method, smart_datalake: SmartDatalake
):
assert smart_datalake.last_result is None
_mocked_method.__name__ = "execute_code"
smart_datalake.chat("How many countries are in the dataframe?")
mock_query_tracker_publish.assert_called()
def test_retry_on_error_with_single_df(
self, smart_datalake: SmartDatalake, smart_dataframe: SmartDataframe
):
code = """result = 'Hello World'"""
smart_dataframe._get_sample_head = Mock(
return_value=pd.DataFrame(
{
"country": ["China", "Japan", "Spain"],
"gdp": [654881226, 9009692259, 8446903488],
"happiness_index": [6.66, 7.16, 6.38],
}
)
)
smart_datalake._retry_run_code(
code=code,
e=Exception("Test error"),
)
last_prompt = smart_datalake.last_prompt
if sys.platform.startswith("win"):
last_prompt = last_prompt.replace("\r\n", "\n")
assert (
last_prompt
== """<dataframe>
dfs[0]:10x3
country,gdp,happiness_index
China,654881226,6.66
Japan,9009692259,7.16
Spain,8446903488,6.38
</dataframe>
The user asked the following question:
You generated this python code:
result = 'Hello World'
It fails with the following error:
Test error
Fix the python code above and return the new python code:""" # noqa: E501
)
@patch("os.makedirs")
def test_initialize_with_cache(self, mock_makedirs, smart_datalake):
# Modify the smart_datalake's configuration
smart_datalake.config.save_charts = True
smart_datalake.config.enable_cache = True
# Call the initialize method
smart_datalake.initialize()
# Assertions for enabling cache
cache_dir = os.path.join(os.getcwd(), "cache")
mock_makedirs.assert_any_call(
cache_dir, mode=DEFAULT_FILE_PERMISSIONS, exist_ok=True
)
# Assertions for saving charts
charts_dir = os.path.join(os.getcwd(), smart_datalake.config.save_charts_path)
mock_makedirs.assert_any_call(
charts_dir, mode=DEFAULT_FILE_PERMISSIONS, exist_ok=True
)
@patch("os.makedirs")
def test_initialize_without_cache(self, mock_makedirs, smart_datalake):
# Modify the smart_datalake's configuration
smart_datalake.config.save_charts = True
smart_datalake.config.enable_cache = False
# Call the initialize method
smart_datalake.initialize()
# Assertions for saving charts
charts_dir = os.path.join(os.getcwd(), smart_datalake.config.save_charts_path)
mock_makedirs.assert_called_once_with(
charts_dir, mode=DEFAULT_FILE_PERMISSIONS, exist_ok=True
)
def test_validate_true_direct_sql_with_non_connector(self, llm, sample_df):
# raise exception with non connector
SmartDatalake(
[sample_df],
config={"llm": llm, "enable_cache": False, "direct_sql": True},
)
def test_validate_direct_sql_with_connector(self, llm, sql_connector):
# not exception is raised using single connector
SmartDatalake(
[sql_connector],
config={"llm": llm, "enable_cache": False, "direct_sql": True},
)
def test_validate_false_direct_sql_with_connector(self, llm, sql_connector):
# not exception is raised using single connector
SmartDatalake(
[sql_connector],
config={"llm": llm, "enable_cache": False, "direct_sql": False},
)
def test_validate_false_direct_sql_with_two_different_connector(
self, llm, sql_connector, pgsql_connector
):
# not exception is raised using single connector
SmartDatalake(
[sql_connector, pgsql_connector],
config={"llm": llm, "enable_cache": False, "direct_sql": False},
)
| [
"How many countries are in the dataframe?",
"\r\n",
"\n"
] |
2024-01-10 | gventuri/pandas-ai | examples~from_sql.py | """Example of using PandasAI with a CSV file."""
from pandasai import SmartDatalake
from pandasai.llm import OpenAI
from pandasai.connectors import MySQLConnector, PostgreSQLConnector, SqliteConnector
# With a MySQL database
loan_connector = MySQLConnector(
config={
"host": "localhost",
"port": 3306,
"database": "mydb",
"username": "root",
"password": "root",
"table": "loans",
"where": [
# this is optional and filters the data to
# reduce the size of the dataframe
["loan_status", "=", "PAIDOFF"],
],
}
)
# With a PostgreSQL database
payment_connector = PostgreSQLConnector(
config={
"host": "localhost",
"port": 5432,
"database": "mydb",
"username": "root",
"password": "root",
"table": "payments",
"where": [
# this is optional and filters the data to
# reduce the size of the dataframe
["payment_status", "=", "PAIDOFF"],
],
}
)
# With a Sqlite database
invoice_connector = SqliteConnector(
config={
"database": "local_path_to_db",
"table": "invoices",
"where": [["status", "=", "pending"]],
}
)
llm = OpenAI()
df = SmartDatalake(
[loan_connector, payment_connector, invoice_connector], config={"llm": llm}
)
response = df.chat("How many people from the United states?")
print(response)
# Output: 247 loans have been paid off by men.
| [
"How many people from the United states?"
] |
2024-01-10 | gventuri/pandas-ai | examples~with_multiple_dataframes.py | """Example of using PandasAI on multiple Pandas DataFrame"""
import pandas as pd
from pandasai import SmartDatalake
from pandasai.llm import OpenAI
employees_df = pd.DataFrame(
{
"EmployeeID": [1, 2, 3, 4, 5],
"Name": ["John", "Emma", "Liam", "Olivia", "William"],
"Department": ["HR", "Sales", "IT", "Marketing", "Finance"],
}
)
salaries_df = pd.DataFrame(
{
"EmployeeID": [1, 2, 3, 4, 5],
"Salary": [5000, 6000, 4500, 7000, 5500],
}
)
llm = OpenAI()
dl = SmartDatalake(
[employees_df, salaries_df],
config={"llm": llm, "verbose": True},
)
response = dl.chat("Plot salaries against name")
print(response)
# Output: <displays the plot>
| [
"Plot salaries against name"
] |
2024-01-10 | gventuri/pandas-ai | examples~from_google_sheets.py | """Example of using PandasAI with am Excel file."""
from pandasai import SmartDataframe
from pandasai.llm import OpenAI
# Betas & Bludgers Writing Competitions List (source: https://heystacks.com/?type=sheets&tags=data)
google_sheets_url = "https://docs.google.com/spreadsheets/d/1VKkhugv2eF87AoOm4OXjI0sQEHrNhxy6gPL3F7xyw7g/edit#gid=115719017" # noqa E501
llm = OpenAI()
df = SmartDataframe(google_sheets_url, config={"llm": llm})
response = df.chat("How many short stories are there?")
print(response)
# Output: 35
| [
"How many short stories are there?"
] |
2024-01-10 | gventuri/pandas-ai | examples~using_pandasai_log_server.py | import os
import pandas as pd
from pandasai import Agent
from pandasai.llm.openai import OpenAI
employees_data = {
"EmployeeID": [1, 2, 3, 4, 5],
"Name": ["John", "Emma", "Liam", "Olivia", "William"],
"Department": ["HR", "Sales", "IT", "Marketing", "Finance"],
}
salaries_data = {
"EmployeeID": [1, 2, 3, 4, 5],
"Salary": [5000, 6000, 4500, 7000, 5500],
}
employees_df = pd.DataFrame(employees_data)
salaries_df = pd.DataFrame(salaries_data)
# Example 1: Using Environment Variables
os.environ["LOGGING_SERVER_URL"] = "SERVER_URL"
os.environ["LOGGING_SERVER_API_KEY"] = "YOUR_API_KEY"
llm = OpenAI("YOUR_API_KEY")
agent = Agent(
[employees_df, salaries_df],
config={
"llm": llm,
"enable_cache": True,
},
memory_size=10,
)
# Chat with the agent
response = agent.chat("Plot salary against department?")
print(response)
# Example 2: Using Config
llm = OpenAI("YOUR_API_KEY")
agent = Agent(
[employees_df, salaries_df],
config={
"llm": llm,
"enable_cache": True,
"log_server": {
"server_url": "SERVER_URL",
"api_key": "YOUR_API_KEY",
},
},
memory_size=10,
)
# Chat with the agent
response = agent.chat("Plot salary against department?")
print(response)
| [
"Plot salary against department?"
] |
2024-01-10 | gventuri/pandas-ai | examples~from_yahoo_finance.py | from pandasai.connectors.yahoo_finance import YahooFinanceConnector
from pandasai import SmartDataframe
from pandasai.llm import OpenAI
yahoo_connector = YahooFinanceConnector("MSFT")
llm = OpenAI(api_token="OPEN_API_KEY")
df = SmartDataframe(yahoo_connector, config={"llm": llm})
response = df.chat("What is the closing price for yesterday?")
print(response)
| [
"What is the closing price for yesterday?"
] |
2024-01-10 | gventuri/pandas-ai | tests~llms~test_langchain_llm.py | """Unit tests for the base LLM class"""
from langchain.llms import OpenAI
import pytest
from pandasai.llm import LangchainLLM
from pandasai.prompts import AbstractPrompt
from unittest.mock import Mock
class TestLangchainLLM:
"""Unit tests for the LangChain wrapper LLM class"""
@pytest.fixture
def langchain_llm(self):
class FakeOpenAI(OpenAI):
openai_api_key = "fake_key"
def __call__(self, _prompt, stop=None, callbacks=None, **kwargs):
return Mock(return_value="Custom response")()
return FakeOpenAI()
@pytest.fixture
def prompt(self):
class MockAbstractPrompt(AbstractPrompt):
template: str = "Hello"
return MockAbstractPrompt()
def test_langchain_llm_type(self, langchain_llm):
langchain_wrapper = LangchainLLM(langchain_llm)
assert langchain_wrapper.type == "langchain_openai"
def test_langchain_model_call(self, langchain_llm, prompt):
langchain_wrapper = LangchainLLM(langchain_llm)
assert (
langchain_wrapper.call(instruction=prompt, suffix="!") == "Custom response"
)
| [
"Hello"
] |
2024-01-10 | gventuri/pandas-ai | examples~with_privacy_enforced.py | """Example of using PandasAI with a Pandas DataFrame"""
import pandas as pd
from pandasai import SmartDataframe
from pandasai.llm import OpenAI
from .data.sample_dataframe import dataframe
llm = OpenAI()
df = SmartDataframe(
df=pd.DataFrame(dataframe), config={"llm": llm, "enforce_privacy": True}
)
response = df.chat("Calculate the sum of the gdp of north american countries")
print(response)
# Output: 20901884461056
| [
"Calculate the sum of the gdp of north american countries"
] |
2024-01-10 | gventuri/pandas-ai | examples~save_chart.py | """Example of using PandasAI to generate and save a chart from a Pandas DataFrame"""
import pandas as pd
import os
from data.sample_dataframe import dataframe
from pandasai import SmartDataframe
from pandasai.llm import OpenAI
from pandasai.helpers import path
df = pd.DataFrame(dataframe)
llm = OpenAI()
try:
user_defined_path = path.find_project_root()
except ValueError:
user_defined_path = os.getcwd()
user_defined_path = os.path.join(user_defined_path, "exports", "charts")
df = SmartDataframe(
df,
config={
"llm": llm,
"save_charts_path": user_defined_path,
"save_charts": True,
"verbose": True,
},
)
response = df.chat(
"Plot the histogram of countries showing for each the gpd,"
" using different colors for each bar",
)
# Output: check out $pwd/exports/charts/{hashid}/chart.png
| [
"Plot the histogram of countries showing for each the gpd, using different colors for each bar"
] |
2024-01-10 | gventuri/pandas-ai | examples~with_name_and_description.py | """Example of using PandasAI with a Pandas DataFrame"""
import pandas as pd
from data.sample_dataframe import dataframe
from pandasai import SmartDataframe
from pandasai.llm import OpenAI
df = pd.DataFrame(dataframe)
llm = OpenAI()
df = SmartDataframe(
df=pd.DataFrame(dataframe),
name="Countries",
description="A dataframe with countries with their GDPs and happiness scores",
config={"llm": llm},
)
response = df.chat("Calculate the sum of the gdp of north american countries")
print(response)
print(df.last_prompt)
# Output: 20901884461056
| [
"Calculate the sum of the gdp of north american countries"
] |
2024-01-10 | gventuri/pandas-ai | examples~from_snowflake.py | """Example of using PandasAI with a Snowflake"""
from pandasai import SmartDataframe
from pandasai.llm import OpenAI
from pandasai.connectors import SnowFlakeConnector
snowflake_connector = SnowFlakeConnector(
config={
"account": "ehxzojy-ue47135",
"database": "SNOWFLAKE_SAMPLE_DATA",
"username": "test",
"password": "*****",
"table": "lineitem",
"warehouse": "COMPUTE_WH",
"dbSchema": "tpch_sf1",
"where": [
# this is optional and filters the data to
# reduce the size of the dataframe
["l_quantity", ">", "49"]
],
}
)
llm = OpenAI(api_token="OPEN_API_KEY")
df = SmartDataframe(snowflake_connector, config={"llm": llm})
response = df.chat("How many records has status 'F'?")
print(response)
| [
"How many records has status 'F'?"
] |
2024-01-10 | gventuri/pandas-ai | pandasai~llm~azure_openai.py | """OpenAI LLM via Microsoft Azure Cloud
This module is to run the OpenAI API when using Microsoft Cloud infrastructure.
Azure has implemented the openai API access to its platform.
For details https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference.
Example:
Use below example to call AzureOpenAI class
>>> from pandasai.llm.azure_openai import AzureOpenAI
"""
import os
from typing import Any, Dict, Optional, Union, Callable
import openai
from ..helpers import load_dotenv
from ..exceptions import APIKeyNotFoundError, MissingModelError
from ..helpers.openai import is_openai_v1
from .base import BaseOpenAI
load_dotenv()
class AzureOpenAI(BaseOpenAI):
"""OpenAI LLM via Microsoft Azure
This class uses `BaseOpenAI` class to support Azure OpenAI features.
"""
azure_endpoint: Union[str, None] = None
"""Your Azure Active Directory token.
Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
For more:
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
"""
azure_ad_token: Union[str, None] = None
"""A function that returns an Azure Active Directory token.
Will be invoked on every request.
"""
azure_ad_token_provider: Union[Callable[[], str], None] = None
deployment_name: str
api_version: str = ""
"""Legacy, for openai<1.0.0 support."""
api_base: str
"""Legacy, for openai<1.0.0 support."""
api_type: str = "azure"
def __init__(
self,
api_token: Optional[str] = None,
azure_endpoint: Union[str, None] = None,
azure_ad_token: Union[str, None] = None,
azure_ad_token_provider: Union[Callable[[], str], None] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
deployment_name: str = None,
is_chat_model: bool = True,
**kwargs,
):
"""
__init__ method of AzureOpenAI Class.
Args:
api_token (str): Azure OpenAI API token.
azure_endpoint (str): Azure endpoint.
It should look like the following:
<https://YOUR_RESOURCE_NAME.openai.azure.com/>
azure_ad_token (str): Your Azure Active Directory token.
Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
For more: https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
azure_ad_token_provider (str): A function that returns an Azure Active Directory token.
Will be invoked on every request.
api_version (str): Version of the Azure OpenAI API.
Be aware the API version may change.
api_base (str): Legacy, kept for backward compatibility with openai < 1.0.
Ignored for openai >= 1.0.
deployment_name (str): Custom name of the deployed model
is_chat_model (bool): Whether ``deployment_name`` corresponds to a Chat
or a Completion model.
**kwargs: Inference Parameters.
"""
self.api_token = (
api_token
or os.getenv("AZURE_OPENAI_API_KEY")
or os.getenv("OPENAI_API_KEY")
)
self.azure_endpoint = azure_endpoint or os.getenv("AZURE_OPENAI_ENDPOINT")
self.api_base = api_base or os.getenv("OPENAI_API_BASE")
self.api_version = api_version or os.getenv("OPENAI_API_VERSION")
if self.api_token is None:
raise APIKeyNotFoundError(
"Azure OpenAI key is required. Please add an environment variable "
"`AZURE_OPENAI_API_KEY` or `OPENAI_API_KEY` or pass `api_token` as a named parameter"
)
if is_openai_v1():
if self.azure_endpoint is None:
raise APIKeyNotFoundError(
"Azure endpoint is required. Please add an environment variable "
"`AZURE_OPENAI_API_ENDPOINT` or pass `azure_endpoint` as a named parameter"
)
elif self.api_base is None:
raise APIKeyNotFoundError(
"Azure OpenAI base is required. Please add an environment variable "
"`OPENAI_API_BASE` or pass `api_base` as a named parameter"
)
if self.api_version is None:
raise APIKeyNotFoundError(
"Azure OpenAI version is required. Please add an environment variable "
"`OPENAI_API_VERSION` or pass `api_version` as a named parameter"
)
if deployment_name is None:
raise MissingModelError(
"No deployment name provided.",
"Please include deployment name from Azure dashboard.",
)
self.azure_ad_token = azure_ad_token or os.getenv("AZURE_OPENAI_AD_TOKEN")
self.azure_ad_token_provider = azure_ad_token_provider
self._is_chat_model = is_chat_model
self.deployment_name = deployment_name
self.openai_proxy = kwargs.get("openai_proxy") or os.getenv("OPENAI_PROXY")
if self.openai_proxy:
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy}
self._set_params(**kwargs)
# set the client
if self._is_chat_model:
self.client = (
openai.AzureOpenAI(**self._client_params).chat.completions
if is_openai_v1()
else openai.ChatCompletion
)
else:
self.client = (
openai.AzureOpenAI(**self._client_params).completions
if is_openai_v1()
else openai.Completion
)
@property
def _default_params(self) -> Dict[str, Any]:
"""
Get the default parameters for calling OpenAI API.
Returns:
dict: A dictionary containing Default Params.
"""
return {
**super()._default_params,
"model" if is_openai_v1() else "engine": self.deployment_name,
}
@property
def _invocation_params(self) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
if is_openai_v1():
return super()._invocation_params
else:
return {
**super()._invocation_params,
"api_type": self.api_type,
"api_version": self.api_version,
}
@property
def _client_params(self) -> Dict[str, any]:
client_params = {
"api_version": self.api_version,
"azure_endpoint": self.azure_endpoint,
"azure_deployment": self.deployment_name,
"azure_ad_token": self.azure_ad_token,
"azure_ad_token_provider": self.azure_ad_token_provider,
}
return {**client_params, **super()._client_params}
@property
def type(self) -> str:
return "azure-openai"
| [] |
2024-01-10 | gventuri/pandas-ai | examples~from_databricks.py | """Example of using PandasAI with a DataBricks"""
from pandasai import SmartDataframe
from pandasai.llm import OpenAI
from pandasai.connectors import DatabricksConnector
databricks_connector = DatabricksConnector(
config={
"host": "adb-*****.azuredatabricks.net",
"database": "default",
"token": "dapidfd412321",
"port": 443,
"table": "loan_payments_data",
"httpPath": "/sql/1.0/warehouses/213421312",
"where": [
# this is optional and filters the data to
# reduce the size of the dataframe
["loan_status", "=", "PAIDOFF"],
],
}
)
llm = OpenAI("OPEN_API_KEY")
df = SmartDataframe(databricks_connector, config={"llm": llm})
response = df.chat("How many people from the United states?")
print(response)
| [
"How many people from the United states?"
] |
2024-01-10 | gventuri/pandas-ai | examples~skills_example.py | import pandas as pd
from pandasai import Agent
from pandasai.llm.openai import OpenAI
from pandasai.skills import skill
employees_data = {
"EmployeeID": [1, 2, 3, 4, 5],
"Name": ["John", "Emma", "Liam", "Olivia", "William"],
"Department": ["HR", "Sales", "IT", "Marketing", "Finance"],
}
salaries_data = {
"EmployeeID": [1, 2, 3, 4, 5],
"Salary": [5000, 6000, 4500, 7000, 5500],
}
employees_df = pd.DataFrame(employees_data)
salaries_df = pd.DataFrame(salaries_data)
# Add function docstring to give more context to model
@skill
def plot_salaries(names: list[str], salaries: list[int]):
"""
Displays the bar chart having name on x-axis and salaries on y-axis using matplotlib
Args:
names (list[str]): Employees' names
salaries (list[int]): Salaries
"""
import matplotlib.pyplot as plt
plt.bar(names, salaries)
plt.xlabel("Employee Name")
plt.ylabel("Salary")
plt.title("Employee Salaries")
plt.xticks(rotation=45)
llm = OpenAI("YOUR-API-KEY")
agent = Agent([employees_df, salaries_df], config={"llm": llm}, memory_size=10)
agent.add_skills(plot_salaries)
# Chat with the agent
response = agent.chat("Plot the employee salaries against names")
print(response)
| [
"Plot the employee salaries against names"
] |
2024-01-10 | gventuri/pandas-ai | examples~using_pipeline.py | import pandas as pd
from pandasai.llm.openai import OpenAI
from pandasai.pipelines.logic_units.output_logic_unit import ProcessOutput
from pandasai.pipelines.synthetic_dataframe.generate_sdf_pipeline import (
GenerateSDFPipeline,
)
from pandasai.pipelines.pipeline_context import PipelineContext
from pandasai.pipelines.synthetic_dataframe.sdf_code_executor import (
SDFCodeExecutor,
)
from pandasai.pipelines.synthetic_dataframe.synthetic_df_prompt import (
SyntheticDataframePrompt,
)
from pandasai.pipelines.logic_units.prompt_execution import PromptExecution
from pandasai.pipelines.pipeline import Pipeline
employees_df = pd.DataFrame(
{
"EmployeeID": [1, 2, 3, 4, 5],
"Name": ["John", "Emma", "Liam", "Olivia", "William"],
"Department": ["HR", "Sales", "IT", "Marketing", "Finance"],
}
)
salaries_df = pd.DataFrame(
{
"EmployeeID": [1, 2, 3, 4, 5],
"Salary": [5000, 6000, 4500, 7000, 5500],
}
)
llm = OpenAI("Your-API-Key")
config = {"llm": llm, "verbose": True}
context = PipelineContext([salaries_df], config)
# Create your own pipeline
pipeline = Pipeline(
context=context,
steps=[
SyntheticDataframePrompt(amount=15),
PromptExecution(),
SDFCodeExecutor(),
ProcessOutput(),
],
)
data_frame = pipeline.run()
print(data_frame)
# Using defined Pipelines
context = PipelineContext([employees_df], config)
pipeline = GenerateSDFPipeline(
amount=10,
context=context,
)
data_frame = pipeline.run()
print(data_frame)
# Without passing Context
pipeline = Pipeline(
[salaries_df],
config=config,
steps=[
SyntheticDataframePrompt(amount=15),
PromptExecution(),
SDFCodeExecutor(),
ProcessOutput(),
],
)
data_frame = pipeline.run()
print(data_frame)
| [] |
2024-01-10 | sv2441/LLM-Hackathon | pages~2_CV%20ranking.py | import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from dotenv import load_dotenv
import docx
import os
import pdfplumber
load_dotenv()
# os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"]
os.environ["OPENAI_API_KEY"] = os.getenv('OPENAI_API_KEY')
chat_llm = ChatOpenAI(temperature=0.0, request_timeout=120)
def extract_text_from_pdf(pdf_file):
with pdfplumber.open(pdf_file) as pdf:
text = ""
for page in pdf.pages:
text += page.extract_text()
return text
def summary(text):
title_template = """you are a HR Recruiter bot.you are given a text from resume.
Summarize the "{topic}" into 50 to 60 words including key skills and technology.
"""
prompt = ChatPromptTemplate.from_template(template=title_template)
messages = prompt.format_messages(topic=text)
response = chat_llm(messages)
return response.content
def read_docx(file_path):
try:
doc = docx.Document(file_path)
text = ""
for para in doc.paragraphs:
text += para.text + "\n"
return text
except Exception as e:
return str(e)
def cv_rank(topic):
file_path = "enhanced_jd.docx"
jd = read_docx(file_path)
title_template = """you are a HR Recruiter bot.
"{topic}" is Resume summary . Score the Summary based on "{jd}". Give the Rate out of 10.
"""
prompt = ChatPromptTemplate.from_template(template=title_template)
messages = prompt.format_messages(topic=topic,jd=jd)
response = chat_llm(messages)
return response.content
def save_as_docx(text, filename):
doc = docx.Document()
doc.add_paragraph(text)
doc.save(filename)
def main():
st.title("CV Ranking")
uploaded_files = st.file_uploader("Upload PDF Resumes", type=["pdf"], accept_multiple_files=True)
if uploaded_files:
st.write("Uploaded Resumes:")
for resume in uploaded_files:
st.write(resume.name)
text = extract_text_from_pdf(resume)
response=summary(text)
st.text(response)
# Display extracted text on the app
if st.button("Rank"):
rank=cv_rank(response)
st.text(rank)
if st.button("Save"):
save_as_docx(response, "summary.docx")
if __name__ == "__main__":
main()
| [
"you are a HR Recruiter bot.you are given a text from resume. \n Summarize the \"{topic}\" into 50 to 60 words including key skills and technology.\n ",
"you are a HR Recruiter bot.\n \"{topic}\" is Resume summary . Score the Summary based on \"{jd}\". Give the Rate out of 10.\n "
] |
2024-01-10 | sv2441/LLM-Hackathon | pages~1_Job%20Description%20evaluation.py | import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from dotenv import load_dotenv
import docx
import os
load_dotenv()
# os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"]
os.environ["OPENAI_API_KEY"] = os.getenv('OPENAI_API_KEY')
chat_llm = ChatOpenAI(temperature=0.0, request_timeout=120)
def main():
st.title("Job Description Evaluation")
# Text input for the test
jd = st.text_input("Enter the Job Description:", value="")
# Display the test result
if st.button("Submit"):
if jd:
enhanced = enhanced_jd(jd)
st.write("User Input JD:", jd)
st.write("Enhanced JD:", enhanced)
if st.button("Old"):
save_as_docx(jd, "jd.docx")
if st.button("New"):
save_as_docx(enhanced, "enhanced_jd.docx")
def enhanced_jd(jd):
title_template = """you are a HR Recruiter bot. you are given a Job description.
This is "{topic}" and Score this Job description out of 10.
Make some necessary enhancements in the given Job description and only Provide the enhanced Version of the "{topic}".
"""
prompt = ChatPromptTemplate.from_template(template=title_template)
messages = prompt.format_messages(topic=jd)
response = chat_llm(messages)
save_as_docx(response.content, "enhanced_jd.docx")
return response.content
def save_as_docx(text, filename):
doc = docx.Document()
doc.add_paragraph(text)
doc.save(filename)
if __name__ == "__main__":
main()
| [
"you are a HR Recruiter bot. you are given a Job description. \n This is \"{topic}\" and Score this Job description out of 10. \n Make some necessary enhancements in the given Job description and only Provide the enhanced Version of the \"{topic}\".\n "
] |
2024-01-10 | sv2441/LLM-Hackathon | pages~3_Genrate_QA.py | import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from dotenv import load_dotenv
import docx
import os
import pdfplumber
load_dotenv()
# os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"]
os.environ["OPENAI_API_KEY"] = os.getenv('OPENAI_API_KEY')
chat_llm = ChatOpenAI(temperature=0.0, request_timeout=120)
def read_docx(file_path):
try:
doc = docx.Document(file_path)
text = ""
for para in doc.paragraphs:
text += para.text + "\n"
return text
except Exception as e:
return str(e)
def genrate_qa(en_jd , summary):
title_template = """you are a Technical interviewer. Develop 15 screening questions for each candidate,
considering different levels of importance or significance assigned to the "{en_jd}" and the "{summary}"
"""
prompt = ChatPromptTemplate.from_template(template=title_template)
messages = prompt.format_messages(en_jd=en_jd,summary=summary)
response = chat_llm(messages)
return response.content
def main():
st.title("Screening Round")
Name = st.text_input("Enter the Name :", value="")
if st.button("Generate Questions"):
file_path = "enhanced_jd.docx"
en_jd = read_docx(file_path)
file_path = "summary.docx"
summary=read_docx(file_path)
questions=genrate_qa(en_jd,summary)
st.text(questions)
if st.button("Submit"):
st.markdown("Thank YOu")
if __name__ == "__main__":
main()
| [
"you are a Technical interviewer. Develop 15 screening questions for each candidate,\n considering different levels of importance or significance assigned to the \"{en_jd}\" and the \"{summary}\"\n "
] |
2024-01-10 | HammadMusaddiq/Python_ML | Embeddings%20Multi%20model~Code~updated_code.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, roc_auc_score, matthews_corrcoef
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertForSequenceClassification, BertTokenizer, BertForNextSentencePrediction
from transformers import RobertaForSequenceClassification, RobertaTokenizer
from transformers import DistilBertForSequenceClassification, DistilBertTokenizer
from transformers import CamembertForSequenceClassification, CamembertTokenizer
from transformers import AlbertForSequenceClassification, AlbertTokenizer
from transformers import OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from transformers.optimization import AdamW
#It works - BERT Model
bert_model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
#It works - Distil BERT
distil_bert_model = DistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased')
distil_bert_tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
#It works- French Language Model
camem_bert_model = CamembertForSequenceClassification.from_pretrained('camembert-base')
camem_bert_tokenizer = CamembertTokenizer.from_pretrained('camembert-base')
#It works - Roberta Tokenizer
roberta_tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
roberta_model = RobertaForSequenceClassification.from_pretrained('roberta-base')
#It works for EPOCHS=30 - Albert Tokenizer
albert_model = AlbertForSequenceClassification.from_pretrained('albert-base-v2')
albert_tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
#It does not work
# tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
# model = OpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt')
#It works- GPT2 Tokenizer
gpt_tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
gpt_model = GPT2LMHeadModel.from_pretrained('gpt2')
# tokenizer.add_special_tokens({'cls_token': '[CLS]', 'pad_token': '[PAD]'})
# model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
models = [bert_model, distil_bert_model, camem_bert_model, roberta_model, albert_model, gpt_model]
tokernizers = [bert_tokenizer, distil_bert_tokenizer, camem_bert_tokenizer, roberta_tokenizer, albert_tokenizer, gpt_tokenizer]
print("#####################################")
print("Models Loaded")
print("#####################################")
df = pd.read_excel('Health Related Policies-Labled 1000 Records.xlsx')
X_train = df['HealthPrivacyPolicy'][:700]
y_train = df['Label'][:700]
X_test = df['HealthPrivacyPolicy'][700:]
y_test = df['Label'][700:]
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
MAX_SEQ_LEN = 32
BATCH_SIZE = 4
NUM_EPOCHS = 1
no_decay = ['bias', 'gamma', 'beta']
count = 1
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
def evaluate(model, test_inputs, token_types, test_masks, test_labels):
# Convert all of our data into torch tensors, the required datatype for our model
prediction_inputs = torch.tensor(test_inputs)
#prediction_token_types = torch.tensor(token_types)
prediction_masks = torch.tensor(test_masks)
prediction_labels = torch.tensor(df['Label'][700:].values)
# Select a batch size for training.
batch_size = BATCH_SIZE
# Create an iterator of our data with torch DataLoader
#prediction_data = TensorDataset(prediction_inputs, prediction_token_types, prediction_masks, prediction_labels)
prediction_data = TensorDataset(prediction_inputs, prediction_masks, prediction_labels)
prediction_sampler = SequentialSampler(prediction_data)
prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size)
## Prediction on test set
# Put model in evaluation mode
model.eval()
# Tracking variables
predictions , true_labels = [], []
# Predict
for batch in prediction_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
#b_input_ids, b_token_type_ids, b_input_mask, b_labels = batch
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and speeding up prediction
with torch.no_grad():
# Forward pass, calculate logit predictions
# logits = model(b_input_ids, token_type_ids=b_token_type_ids, attention_mask=b_input_mask)
logits = model(b_input_ids, attention_mask=b_input_mask)
# Move logits and labels to CPU
logits = logits[0].cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Store predictions and true labels
predictions.append(logits)
true_labels.append(label_ids)
# matthews_set = []
# for i in range(len(true_labels)):
# matthews = matthews_corrcoef(true_labels[i],
# np.argmax(predictions[i], axis=1).flatten())
# matthews_set.append(matthews)
# Flatten the predictions and true values for aggregate Matthew's evaluation on the whole dataset
flat_predictions = [item for sublist in predictions for item in sublist]
flat_predictions = np.argmax(flat_predictions, axis=1).flatten()
flat_true_labels = [item for sublist in true_labels for item in sublist]
scores = {
#'matthews_corrcoef_acc': matthews_corrcoef(flat_true_labels, flat_predictions),
'precision': precision_score(flat_true_labels, flat_predictions),
'recall': recall_score(flat_true_labels, flat_predictions),
'f1_score': f1_score(flat_true_labels, flat_predictions),
'accuracy': accuracy_score(flat_true_labels, flat_predictions)
}
return scores
for model, tokenizer in zip(models, tokernizers):
input_ids = []
attention_masks = []
input_ids_test = []
attention_masks_test = []
token_type_ids_test = []
for tweet in X_train:
if model != gpt_model:
#Only for BERT models
encoded_data = tokenizer.encode_plus(text=tweet, max_length=512, pad_to_max_length=True)
input_ids.append(encoded_data['input_ids'])
else:
#ONly FOR OpenAI-GPT and GPT2
tokenizer.add_special_tokens({'cls_token': '[CLS]', 'pad_token': '[PAD]'})
# model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
encoded_data = tokenizer.encode_plus(text=tweet, max_length=512, pad_to_max_length=True)
input_ids.append(encoded_data['input_ids'])
#token_type_ids.append(encoded_data['token_type_ids'])
attention_masks.append(encoded_data['attention_mask'])
# Use train_test_split to split our training data into train and validation sets for training
train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(
input_ids, y_train, random_state=27, test_size=0.20)
#train_token_types, validation_token_types, _, _ = train_test_split(token_type_ids, input_ids,
# random_state=RANDOM_SEED, test_size=0.20)
train_masks, validation_masks, _, _ = train_test_split(attention_masks, input_ids, random_state=27, test_size=0.20)
print("Train Data: ", len(train_inputs), len(train_masks), len(train_labels))
print("Test Data: ", len(validation_inputs), len(validation_masks, len(validation_labels)))
# Convert all of our data into torch tensors, the required datatype for our model
train_inputs = torch.tensor(train_inputs)
validation_inputs = torch.tensor(validation_inputs)
#train_token_types = torch.tensor(train_token_types)
#validation_token_types = torch.tensor(validation_token_types)
train_masks = torch.tensor(train_masks)
validation_masks = torch.tensor(validation_masks)
train_labels = torch.tensor(df['Label'][:560].values)
validation_labels = torch.tensor(df['Label'][560:700].values)
# Select a batch size for training.
batch_size = BATCH_SIZE
# Create an iterator of our data with torch DataLoader
#train_data = TensorDataset(train_inputs, train_token_types, train_masks, train_labels)
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
#validation_data = TensorDataset(validation_inputs, validation_token_types, validation_masks, validation_labels)
validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)
torch.cuda.empty_cache()
# specify GPU device
#os.environ['CUDA_VISIBLE_DEVICES'] = '1'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = 'cpu'
#print(device)
n_gpu = torch.cuda.device_count()
#print(n_gpu)
# torch.cuda.get_device_name(0)
# BERT fine-tuning parameters
param_optimizer = list(model.named_parameters())
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
optimizer = AdamW(params=optimizer_grouped_parameters, lr=2e-5, weight_decay=0.01)
# Store our loss and accuracy for plotting
train_loss_set = []
# Number of training epochs
epochs = NUM_EPOCHS
# model.cuda()
model.to(device)
# BERT training loop
for epoch in range(epochs):
print('Epoch {}/{}'.format(epoch+1, epochs))
## TRAINING
# Set our model to training mode
model.train()
# Tracking variables
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
# Train the data for one epoch
for step, batch in enumerate(train_dataloader):
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
#b_input_ids, b_token_type_ids, b_input_mask, b_labels = batch
b_input_ids, b_input_mask, b_labels = batch
# Clear out the gradients (by default they accumulate)
optimizer.zero_grad()
# Forward pass
#loss, _ = model(input_ids = b_input_ids, token_type_ids=b_token_type_ids, attention_mask=b_input_mask, labels=b_labels)
#loss, _ = model(input_ids = b_input_ids, token_type_ids=b_token_type_ids, attention_mask=b_input_mask, next_sentence_label=b_labels)
loss, _ = model(input_ids = b_input_ids, attention_mask=b_input_mask, labels=b_labels, return_dict=False)
train_loss_set.append(loss.item())
# Backward pass
loss.backward()
# Update parameters and take a step using the computed gradient
optimizer.step()
# Update tracking variables
tr_loss += loss.item()
nb_tr_examples += b_input_ids.size(0)
nb_tr_steps += 1
print("Train loss: {:.4f}".format(tr_loss/nb_tr_steps))
## VALIDATION
# Put model in evaluation mode
model.eval()
# Tracking variables
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
# Evaluate data for one epoch
for batch in validation_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
#b_input_ids, b_token_type_ids, b_input_mask, b_labels = batch
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and speeding up validation
with torch.no_grad():
# Forward pass, calculate logit predictions
#logits = model(b_input_ids, token_type_ids=b_token_type_ids, attention_mask=b_input_mask)
logits = model(b_input_ids, attention_mask=b_input_mask)
# Move logits and labels to CPU
logits = np.array(logits[0].cpu())
label_ids = b_labels.to('cpu').numpy()
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_steps += 1
print("Validation Accuracy: {:.4f}\n".format(eval_accuracy/nb_eval_steps))
# plot training performance
plt.figure(figsize=(15,8))
#plt.title("Training loss")
plt.xlabel("Batch")
plt.ylabel("Training Loss")
plt.plot(train_loss_set)
# plt.show()
plt.savefig("model_" + str(count) + 'jpg')
for tweet in X_test:
encoded_data_test = tokenizer.encode_plus(text=tweet, max_length=MAX_SEQ_LEN, pad_to_max_length=True)
input_ids_test.append(encoded_data_test['input_ids'])
#token_type_ids_test.append(encoded_data_test['token_type_ids'])
attention_masks_test.append(encoded_data_test['attention_mask'])
scores = evaluate(model, input_ids_test, token_type_ids_test, attention_masks_test, y_test)
print('{:.4f}, {:.4f}, {:.4f}, {:.4f}'.format(scores['accuracy'], scores['precision'], scores['recall'], scores['f1_score']))
print("######################################")
print("Model_" + str(count) + ":Done")
print("######################################")
count += 1
| [] |
2024-01-10 | LLMLab/auto_anno | auto_anno_2~utils~auto_learn~cluster_text.py | import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import euclidean_distances
import numpy as np
import random
import sys
sys.path.append('.')
from ...local_config import emb, config
openai_key = config['openai']['key']
EMBEDDING_BY = 'yiyan' # openai | bert
if EMBEDDING_BY == 'openai':
import openai
elif EMBEDDING_BY == 'bert':
from transformers import AutoTokenizer, AutoModelForMaskedLM
tokenizer = AutoTokenizer.from_pretrained("bert-base-chinese")
model = AutoModelForMaskedLM.from_pretrained("bert-base-chinese")
import torch
def get_embedding(text, by=EMBEDDING_BY):
if by == 'openai':
# Set OpenAI API key
openai.api_key = random.choice(openai_key) if type(openai_key) == list else openai_key
model = "text-embedding-ada-002"
emb_req = openai.Embedding.create(input=[text], model=model)
embedding = emb_req.data[0].embedding
return embedding
elif by == 'bert':
encoded_input = tokenizer(text, padding=True, truncation=True, return_tensors='pt')
output = model(**encoded_input)
embedding = torch.mean(output[0], dim=1).squeeze(0)
return embedding.detach().numpy()
else:
return emb(text)
return None
def cluster_text(text_list, n_clusters=20):
if n_clusters >= len(text_list):
return text_list
# Convert text_list to numerical data
data = []
for text in text_list:
embedding = get_embedding(text, by=EMBEDDING_BY)
data.append(embedding)
data = np.array(data)
# Cluster the data
kmeans = KMeans(n_clusters=n_clusters)
kmeans.fit(data)
# Get the cluster centers
centers = kmeans.cluster_centers_
# Get the distances to each center
# distances = kmeans.transform(data)
distances = euclidean_distances(data, centers)
# Get the centers' index
indexes = np.argmin(distances, axis=0)
# Get the samples with the smallest distance to their center
samples = [text_list[idx] for idx in indexes]
return samples
def plot_clusters(text_list, n_clusters=20, openai_api_key=openai_key):
# Set OpenAI API key
openai.api_key = openai_api_key
model = "text-embedding-ada-002"
# Convert text_list to numerical data using OpenAI API
data = []
for text in text_list:
emb_req = openai.Embedding.create(input=[text], model=model)
embeddings = emb_req.data[0].embedding
data.append(embeddings)
data = np.array(data)
# Cluster the data
kmeans = KMeans(n_clusters=n_clusters)
kmeans.fit(data)
# Reduce the dimensionality of the data
pca = PCA(n_components=2)
reduced_data = pca.fit_transform(data)
# Plot the reduced data
plt.scatter(reduced_data[:, 0], reduced_data[:, 1], c=kmeans.labels_)
for i, text in enumerate(text_list):
plt.annotate(text, (reduced_data[i, 0], reduced_data[i, 1]))
plt.show()
if __name__ == "__main__":
test_data = [
'一百多和三十的也看不出什么区别,包装精美,质量应该不错。',
'质量很好 料子很不错 做工细致 样式好看 穿着很漂亮',
' 会卷的 建议买大的小的会卷 胖就别买了 没用',
'大差了 布料很差 我也不想多说',
'一点也不好,我买的东西拿都拿到快递员自己签收了还不给我,恶心恶心恶心,不要脸不要脸',
'一百多和三十的也看不出什么区别,包装精美,质量应该不错。',
'质量很好 料子很不错 做工细致 样式好看 穿着很漂亮',
' 会卷的 建议买大的小的会卷 胖就别买了 没用',
'大差了 布料很差 我也不想多说',
'一点也不好,我买的东西拿都拿到快递员自己签收了还不给我,恶心恶心恶心,不要脸不要脸',
]
result = cluster_text(test_data, n_clusters=3)
# plot_clusters(test_data, n_clusters=3)
print(result)
| [] |
2024-01-10 | kaueltzen/pymatgen | pymatgen~analysis~interface.py | """
This module provides classes to store, generate, and manipulate material interfaces.
"""
from __future__ import annotations
import warnings
from pymatgen.analysis.interfaces import CoherentInterfaceBuilder # noqa: F401
from pymatgen.core.interface import Interface # noqa: F401
__author__ = "Eric Sivonxay, Shyam Dwaraknath, and Kyle Bystrom"
__copyright__ = "Copyright 2019, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Kyle Bystrom"
__email__ = "[email protected]"
__date__ = "5/29/2019"
__status__ = "Prototype"
warnings.warn(
"The substrate_analyzer module is being moved to the interfaces submodule in analysis."
" These imports will break in Pymatgen 2023",
category=FutureWarning,
stacklevel=2,
)
| [] |
2024-01-10 | dkalpakchi/SweCTRL-Mini | human_eval~synthesize_gpt3.py | import os
import json
import time
import dotenv
import yaml
import openai
import requests
import jsonlines as jsl
from tqdm import tqdm
prompt_templates = {
"news": "Skriv en nyhetsartikel.\\{}",
"wiki": "Skriv en artikel i Wikipedia.\\{}",
"news_sport": "Skriv en nyhetsartikel om idrott.\\{}",
"blogs": "Skriv ett blogginlägg.\\{}",
"news_pressrelease": "Skriv ett pressmeddelande.\\{}",
"ads": "Skriv en annons.\\{}",
"news_opinion": "Skriv en insändare.\\{}",
"news_culture": "Skriv en nyhetsartikel om kultur.\\{}",
"admin": "Skriv en förvaltningstext.\\{}",
"news_economy": "Skriv en nyhetsartikel om ekonomi.\\{}",
"info_medical": "Skriv en informerande text om ett medicinskt ämne.\\{}",
"info": "Skriv en informerande text.\\{}",
"news_tech": "Skriv en nyhetsartikel om teknologi.\\{}",
"review": "Skriv en recension.\\{}",
"info_travel": "Skriv en informerande text om resor.\\{}",
"news_lifestyle": "Skriv en nyhetsartikel om livstil.\\{}",
"blogs_sport": "Skriv ett blogginlägg om idrott.\\{}",
"info_lifestyle": "Skriv en informerande text om livstil.\\{}",
"news_sustainability": "Skriv en nyhetsartikel om hållbarhet.\\{}",
"news_travel": "Skriv en nyhetsartikel om resor.\\{}",
"info_business": "Skriv en informerande text om affär.\\{}",
"news_politics": "Skriv en nyhetsartikel om politik.\\{}",
"news_science": "Skriv en nyhetsartikel om vetenskap.\\{}",
"news_food": "Skriv en nyhetsartikel om mat.\\{}",
"news_fashion": "Skriv en nyhetsartikel om mode.\\{}",
"news_weather": "Skriv en nyhetsartikel om vädret.\\{}",
"blogs_economy": "Skriv ett blogginlägg om ekonomi.\\{}"
}
if __name__ == '__main__':
dotenv.load_dotenv()
openai.api_key = os.getenv('SECRET_KEY')
with open("prompts.yaml") as f:
prompts = yaml.load(f)
generated = []
ts = int(time.time())
max_samples = 5
with jsl.open('generated_{}.jsonl'.format(ts), 'w') as writer:
for cat in tqdm(prompts):
for subcat, prompt_lst in prompts[cat].items():
for prompt in prompt_lst:
text = prompt_templates[cat].format(prompt)
num_samples = 0
while True:
if num_samples == max_samples:
break
try:
gen_params = {
'prompt': text,
'temperature': 0.7,
'max_tokens': 256
}
completion = openai.Completion.create(engine='text-davinci-003', **gen_params)
num_samples += 1
except openai.error.RateLimitError:
time.sleep(60)
continue
writer.write({
'text': text,
'cat': cat,
'subcat': subcat,
'params': gen_params,
'res': completion
})
| [
"{'news': 'Skriv en nyhetsartikel.\\\\{}', 'wiki': 'Skriv en artikel i Wikipedia.\\\\{}', 'news_sport': 'Skriv en nyhetsartikel om idrott.\\\\{}', 'blogs': 'Skriv ett blogginlägg.\\\\{}', 'news_pressrelease': 'Skriv ett pressmeddelande.\\\\{}', 'ads': 'Skriv en annons.\\\\{}', 'news_opinion': 'Skriv en insändare.\\\\{}', 'news_culture': 'Skriv en nyhetsartikel om kultur.\\\\{}', 'admin': 'Skriv en förvaltningstext.\\\\{}', 'news_economy': 'Skriv en nyhetsartikel om ekonomi.\\\\{}', 'info_medical': 'Skriv en informerande text om ett medicinskt ämne.\\\\{}', 'info': 'Skriv en informerande text.\\\\{}', 'news_tech': 'Skriv en nyhetsartikel om teknologi.\\\\{}', 'review': 'Skriv en recension.\\\\{}', 'info_travel': 'Skriv en informerande text om resor.\\\\{}', 'news_lifestyle': 'Skriv en nyhetsartikel om livstil.\\\\{}', 'blogs_sport': 'Skriv ett blogginlägg om idrott.\\\\{}', 'info_lifestyle': 'Skriv en informerande text om livstil.\\\\{}', 'news_sustainability': 'Skriv en nyhetsartikel om hållbarhet.\\\\{}', 'news_travel': 'Skriv en nyhetsartikel om resor.\\\\{}', 'info_business': 'Skriv en informerande text om affär.\\\\{}', 'news_politics': 'Skriv en nyhetsartikel om politik.\\\\{}', 'news_science': 'Skriv en nyhetsartikel om vetenskap.\\\\{}', 'news_food': 'Skriv en nyhetsartikel om mat.\\\\{}', 'news_fashion': 'Skriv en nyhetsartikel om mode.\\\\{}', 'news_weather': 'Skriv en nyhetsartikel om vädret.\\\\{}', 'blogs_economy': 'Skriv ett blogginlägg om ekonomi.\\\\{}'}"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.