date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | chethanreddy123/SalesKRA-Query-engine | BackEndRoutes~RobustChat.py | from langchain.chains import ConversationChain
from langchain.chains.conversation.memory import (ConversationBufferMemory,
ConversationSummaryMemory,
ConversationBufferWindowMemory,
ConversationKGMemory)
from langchain.callbacks import get_openai_callback
from langchain.llms import GooglePalm
from fastapi import FastAPI, HTTPException, Request
from pydantic import BaseModel
import json
from pymongo.mongo_client import MongoClient
from fastapi.middleware.cors import CORSMiddleware
import logging
llm = GooglePalm(
model='models/text-bison-001',
temperature=0,
# The maximum length of the response
max_output_tokens=80000,
google_api_key='AIzaSyA1fu-ob27CzsJozdr6pHd96t5ziaD87wM'
)
MongoDB_Key = "mongodb://aioverflow:[email protected]:27017,ac-pu6wews-shard-00-01.me4dkct.mongodb.net:27017,ac-pu6wews-shard-00-02.me4dkct.mongodb.net:27017/?ssl=true&replicaSet=atlas-jcoztp-shard-0&authSource=admin&retryWrites=true&w=majority"
Data = MongoClient(MongoDB_Key)
EmployeeData = Data['FinalAxisBankHackathon']['EmployeeData']
KRAsData = Data['FinalAxisBankHackathon']['KRAsData']
def count_tokens(chain, query):
with get_openai_callback() as cb:
result = chain.run(query)
print(f'Spent a total of {cb.total_tokens} tokens')
return result
def load_json_file(file_path):
with open(file_path, 'r') as json_file:
json_string = json_file.read()
return json_string
documents = EmployeeData.find()
# Convert documents to JSON strings
json_string = [json.dumps(doc, default=str) for doc in documents]
# Initialize your LLM chain and conversation chain
llm = GooglePalm(
model='models/text-bison-001',
temperature=0,
max_output_tokens=80000,
google_api_key='AIzaSyA1fu-ob27CzsJozdr6pHd96t5ziaD87wM'
)
conversation_kg = ConversationChain(
llm=llm,
memory=ConversationKGMemory(llm=llm)
)
# Initialize conversation_kg with the initial message
initial_message = f'''Good morning AI!, You are an Expert
in Sales Key Result Areas (KRA) Setting and Performance Management.
You are here to help me with my queries regarding Sales Key Result Areas
(KRA) Setting and Performance Management, And all the sales employee data is
given below for future analysis: {json_string}'''
logging.info(initial_message)
check = conversation_kg(initial_message)
# Initialize FastAPI app
app = FastAPI()
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Define request and response models (if needed)
class ChatRequest(BaseModel):
query: str
class ChatResponse(BaseModel):
response: str
# Define API endpoint for chat
@app.post("/chat/")
async def chat(request: Request):
json_data = await request.json()
query = json_data.get("query")
try:
response = count_tokens(conversation_kg, query)
return {"response": response}
except Exception as e:
return HTTPException(status_code=500, detail=str(e))
# Define a route for initial setup (if needed)
@app.get("/")
def initial_setup():
return {"message": "Server is up and running!"}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
| [] |
2024-01-10 | chethanreddy123/SalesKRA-Query-engine | Notebooks~MainRoutes.py | import pandas as pd
import os
from fastapi import HTTPException, FastAPI
import os
from fastapi.middleware.cors import CORSMiddleware
import pandas as pd
from loguru import logger
import matplotlib.pyplot as plt
import seaborn as sns
from fastapi.responses import FileResponse
from langchain.chains import LLMChain, ConversationChain
from langchain.chains.conversation.memory import (ConversationBufferMemory,
ConversationSummaryMemory,
ConversationBufferWindowMemory,
ConversationKGMemory)
from langchain.agents import create_pandas_dataframe_agent
from langchain.llms import OpenAI
from sqlalchemy import create_engine, MetaData
from langchain.llms import GooglePalm
from langchain.callbacks import get_openai_callback
from sqlalchemy import create_engine, MetaData
from langchain.utilities import SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.agents.agent_types import AgentType
from sqlalchemy import create_engine, MetaData
from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.agents.agent_types import AgentType
from fastapi.responses import HTMLResponse
def count_tokens(agent, query):
with get_openai_callback() as cb:
result = agent(query)
print(f'Spent a total of {cb.total_tokens} tokens')
return result
# Load Customers.csv
Customers = pd.read_csv('Customers.csv')
# Load Customers_Employees.csv
Customers_Employees = pd.read_csv('Customers_Employees.csv')
# Load Product_Holding.csv
Product_Holding = pd.read_csv('Product_Holding.csv')
# Load RM_KRAs.csv
RM_KRAs = pd.read_csv('RM_KRAs.csv')
# Load contacthistory.csv
contacthistory = pd.read_csv('contacthistory.csv')
# Load Persona.csv
Persona = pd.read_csv('Persona.csv')
# Load Employees.csv
Employees = pd.read_csv('Employees.csv')
def load_csv_to_dataframes():
dataframes = {}
csv_files = [
'Customers.csv',
'Customers_Employees.csv',
'Product_Holding.csv',
'RM_KRAs.csv',
'contacthistory.csv',
'Persona.csv',
'Employees.csv'
]
for i, csv_file in enumerate(csv_files, 1):
if os.path.isfile(csv_file):
dataframe_name = f'df{i}'
dataframes[dataframe_name] = pd.read_csv(csv_file)
print(f"Loaded '{csv_file}' as '{dataframe_name}' dataframe.")
else:
print(f"File '{csv_file}' does not exist.")
return dataframes
dataframes = load_csv_to_dataframes()
df = [dataframes[i] for i,j in dataframes.items()]
llm = GooglePalm(
model='models/text-bison-001',
temperature=0,
max_output_tokens=80000,
google_api_key='AIzaSyA1fu-ob27CzsJozdr6pHd96t5ziaD87wM'
)
agent_pandas = create_pandas_dataframe_agent(llm,
df, verbose=True, )
# Create engines for both databases
engine = create_engine("sqlite:///data_KRA.sqlite")
db = SQLDatabase(engine)
sql_chain = SQLDatabaseChain(llm=llm, database=db, verbose=True)
agent_sql = create_sql_agent(
llm=llm,
toolkit=SQLDatabaseToolkit(db=db, llm=llm),
verbose=True,
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
max_iterations=5,
handle_parsing_errors=True
)
app = FastAPI()
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.post("/query_pandas/")
def run_query(query: dict):
if "query" not in query:
raise HTTPException(status_code=400, detail="Query missing in request")
result = agent_pandas.run(query["query"])
return {"query": query["query"], "result": result['output']}
@app.post("/query_sql/")
def query_sql(query: dict):
if "query" not in query:
raise HTTPException(status_code=400, detail="Query missing in request")
result = count_tokens(agent_sql, query["query"])
return {"query": query["query"], "result": result['output']}
# @app.get("/html/")
# def send_html():
# return HTMLResponse(content=html_content, status_code=200)
#################### Objective - 3 ##########################
#################### Objective - 1 ##########################
conversation_buf_report = ConversationChain(
llm=llm,
memory=ConversationBufferMemory()
)
# Initialize conversation_buf with the initial message
initial_message_report = f'''Act like expert Data Analyst tell
wheather the query is related to theoritical or graphical question
Output Should a string of theoritical or graphical'''
check_report = conversation_buf_report(initial_message_report)
@app.post("/query_report/")
def query_report(query: dict):
if "query" not in query:
raise HTTPException(status_code=400, detail="Query missing in request")
result = count_tokens(conversation_buf_report, query["query"])
logger.info(result['response'])
# Run the agent over multiple dataframe
agent = create_pandas_dataframe_agent(llm , [Customers, Customers_Employees, Product_Holding ,
RM_KRAs , contacthistory , Persona , Employees], verbose=True,
return_intermediate_steps = True, max_iterations=5
)
ListOfCharts = ['bar', 'line', 'histogram' , 'pie' , 'scatter' , 'boxplot' , 'violinplot']
for plot in ListOfCharts:
try:
answer = agent(f"make {plot} chart using seaborn and save the graph as 'sample.png' , Query:{query['query']}, Please do go through all the tables for the analysis")
return answer
return FileResponse('sample.png')
except Exception as e:
continue
@app.post("/query_for_table/")
def query_report(query: dict):
if "query" not in query:
raise HTTPException(status_code=400, detail="Query missing in request")
# Run the agent over multiple dataframe
agent = create_pandas_dataframe_agent(llm , [Customers, Customers_Employees, Product_Holding ,
RM_KRAs , contacthistory , Persona , Employees], verbose=True,
max_iterations=7)
answer = agent.run(query['query'])
return answer
| [] |
2024-01-10 | chethanreddy123/SalesKRA-Query-engine | BackEndRoutes~sample.py | from dotenv import load_dotenv
import os
import BackEndRoutes.streamlit as st
import pandas as pd
from pandasai import PandasAI
import matplotlib.pyplot as plt
from langchain.llms import GooglePalm
llm = GooglePalm(
model='models/text-bison-001',
temperature=0,
max_output_tokens=8196,
google_api_key='AIzaSyA1fu-ob27CzsJozdr6pHd96t5ziaD87wM'
)
# llm = OpenAssistant(api_token=API_KEY)
# llm = Falcon(api_token=API_KEY)
# to disable pandasai.log: PandasAI(llm, enable_logging=False)
pandas_ai = PandasAI(llm)
st.title('Analysis CSV file with Ai')
uploaded_file = st.file_uploader('Upload your CSV file', type=['csv'])
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
st.write(df.head(3))
prompt = st.text_input('Enter your prompt')
if st.button('Generate'):
if prompt:
with st.spinner("generating response..."):
output = pandas_ai.run(df, prompt=prompt)
# Convert Axes object to Figure object
if isinstance(output, plt.Axes):
fig = output.figure
st.pyplot(fig)
else:
st.write(output)
else:
st.warning("Please enter your prompt.") | [
"Enter your prompt"
] |
2024-01-10 | chethanreddy123/SalesKRA-Query-engine | BackEndRoutes~DashBoard.py | import BackEndRoutes.streamlit as st
from pathlib import Path
from langchain.llms.openai import OpenAI
from langchain.agents import create_sql_agent
from langchain.sql_database import SQLDatabase
from langchain.agents.agent_types import AgentType
from langchain.callbacks import StreamlitCallbackHandler
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.llms import GooglePalm
st.set_page_config(page_title="Report Generator", page_icon="🦜")
st.title("Report Generator for your KRAs and more")
db_uri = "sqlite:///data_KRA.sqlite"
# Setup agent
llm = llm = GooglePalm(
model='models/text-bison-001',
temperature=0,
# The maximum length of the response
max_output_tokens=80000,
google_api_key='AIzaSyA1fu-ob27CzsJozdr6pHd96t5ziaD87wM'
)
@st.cache_resource(ttl="2h")
def configure_db(db_uri):
return SQLDatabase.from_uri(database_uri=db_uri)
db = configure_db(db_uri)
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
agent = create_sql_agent(
llm=llm,
toolkit=toolkit,
verbose=True,
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
if "messages" not in st.session_state or st.sidebar.button("Clear message history"):
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
user_query = st.chat_input(placeholder="Ask me anything!")
if user_query:
st.session_state.messages.append({"role": "user", "content": user_query})
st.chat_message("user").write(user_query)
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container())
response = agent.run(user_query, callbacks=[st_cb])
st.session_state.messages.append({"role": "assistant", "content": response})
st.write(response) | [
"How can I help you?"
] |
2024-01-10 | chethanreddy123/SalesKRA-Query-engine | BackEndRoutes~BackEnd.py | from fastapi import FastAPI, HTTPException, Request, Response
import google.generativeai as palm
from fastapi.middleware.cors import CORSMiddleware
import pymongo
from pymongo.mongo_client import MongoClient
from loguru import logger
import random
from langchain.chains import ConversationChain
from langchain.chains.conversation.memory import (ConversationBufferMemory,
ConversationSummaryMemory,
ConversationBufferWindowMemory,
ConversationKGMemory)
from langchain.callbacks import get_openai_callback
from langchain.llms import GooglePalm
from fastapi import FastAPI, HTTPException, Request
from pydantic import BaseModel
import json
from pymongo.mongo_client import MongoClient
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse
llm = GooglePalm(
model='models/text-bison-001',
temperature=0,
# The maximum length of the response
max_output_tokens=80000,
google_api_key='AIzaSyA1fu-ob27CzsJozdr6pHd96t5ziaD87wM'
)
MongoDB_Key = "mongodb://aioverflow:[email protected]:27017,ac-pu6wews-shard-00-01.me4dkct.mongodb.net:27017,ac-pu6wews-shard-00-02.me4dkct.mongodb.net:27017/?ssl=true&replicaSet=atlas-jcoztp-shard-0&authSource=admin&retryWrites=true&w=majority"
Data = MongoClient(MongoDB_Key)
EmployeeData = Data['FinalAxisBankHackathon']['EmployeeData']
KRAsData = Data['FinalAxisBankHackathon']['KRAsData']
def count_tokens(chain, query):
with get_openai_callback() as cb:
result = chain.run(query)
print(f'Spent a total of {cb.total_tokens} tokens')
return result
def load_json_file(file_path):
with open(file_path, 'r') as json_file:
json_string = json_file.read()
return json_string
documents = EmployeeData.find()
# Convert documents to JSON strings
json_string = [json.dumps(doc, default=str) for doc in documents]
# Initialize your LLM chain and conversation chain
llm_chat = GooglePalm(
model='models/text-bison-001',
temperature=0,
max_output_tokens=80000,
google_api_key='AIzaSyA1fu-ob27CzsJozdr6pHd96t5ziaD87wM'
)
conversation_buf = ConversationChain(
llm=llm_chat,
memory=ConversationBufferMemory()
)
# Initialize conversation_buf with the initial message
initial_message = f'''Good morning AI!, You are an Expert
in Sales Key Result Areas (KRA) Setting and Performance Management.
You are here to help me with my queries regarding Sales Key Result Areas
(KRA) Setting and Performance Management, And all the sales employee data is
given below for future analysis: {json_string}'''
check = conversation_buf(initial_message)
palm.configure(api_key='AIzaSyA1fu-ob27CzsJozdr6pHd96t5ziaD87wM')
app = FastAPI()
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
response = palm.chat(messages=["Jon is very good boy and works and microsoft"])
MongoDB_Key = "mongodb://aioverflow:[email protected]:27017,ac-pu6wews-shard-00-01.me4dkct.mongodb.net:27017,ac-pu6wews-shard-00-02.me4dkct.mongodb.net:27017/?ssl=true&replicaSet=atlas-jcoztp-shard-0&authSource=admin&retryWrites=true&w=majority"
Data = MongoClient(MongoDB_Key)
EmployeeData = Data['FinalAxisBankHackathon']['EmployeeData']
KRAsData = Data['FinalAxisBankHackathon']['KRAsData']
conversation_buf_react = ConversationChain(
llm=llm_chat,
memory=ConversationBufferMemory()
)
# Initialize conversation_buf with the initial message
initial_message_react = f'''Act like expert Data analyst and react developer'''
check_react = conversation_buf_react(initial_message_react)
############### Additional Functions #####################
def generate_random_id():
# Generate a random 6-digit number
random_number = random.randint(100000, 999999)
# Combine with the fixed string 'AXIS'
new_id = f'AXIS{random_number}'
return new_id
def generate_random_id_KRA():
# Generate a random 6-digit number
random_number = random.randint(100000, 999999)
# Combine with the fixed string 'AXIS'
new_id = f'KRA{random_number}'
return new_id
############### API Endpoints #####################
# Define API endpoint for chat
@app.post("/chat/")
async def chat(request: Request):
# json_data = await request.json()
# query = json_data.get("query")
# try:
# response = count_tokens(conversation_buf, query)
# return {"response": response}
# except Exception as e:
# return HTTPException(status_code=500, detail=str(e))
image_path = "Screenshot 2023-09-14 at 7.00.54 PM.png" # Update with your image file path
return FileResponse(image_path)
@app.post("/chat_react/")
async def chat(request: Request):
json_data = await request.json()
query = json_data.get("query")
try:
response = count_tokens(conversation_buf_react, query)
return {"response": response}
except Exception as e:
return HTTPException(status_code=500, detail=str(e))
@app.post("/AddEmployee/")
def NewPatient(info : dict):
req_info = info
req_info = dict(req_info)
print(req_info)
logger.info("recieved new patient details")
req_info['personalInformation']['EmployeeID'] = generate_random_id()
req_info['ListOfKRAs'] = []
try:
Check = EmployeeData.insert_one(req_info)
if Check.acknowledged == True:
logger.info("patient added successfully")
return {"status": "success" , "EmployeeID": req_info['personalInformation']['EmployeeID']}
else:
logger.info("patient not added")
return {"status": "failed"}
except Exception as e:
logger.error(e)
return {"status": "failed"}
@app.post("/GetEmployee/")
def GetEmployee(info : dict):
req_info = info
req_info = dict(req_info)
EmployeeID = req_info['EmployeeID']
Result = EmployeeData.find_one({"personalInformation.EmployeeID": EmployeeID})
if Result is None:
return {"status": "failed"}
else:
del Result['_id']
return Result
@app.post("/AddKRAtoEmployee/")
def AddKRAtoEmployee(info : dict):
req_info = info
req_info = dict(req_info)
EmployeeID = req_info['EmployeeID']
KRAID = req_info['KRAID']
Result = EmployeeData.find_one({"personalInformation.EmployeeID": EmployeeID})
if Result is None:
return {"status": "failed"}
else:
del Result['_id']
Result['ListOfKRAs'].append(KRAID)
Check = EmployeeData.update_one({"personalInformation.EmployeeID": EmployeeID}, {"$set": Result})
if Check.acknowledged == True:
return {"status": "success"}
else:
return {"status": "failed"}
@app.get("/GetAllEmployees/")
def GetAllEmployees():
logger.info("recieved all employee details")
Result = list(EmployeeData.find({}))
if Result is None:
return {"status": "failed"}
else:
for i in Result:
del i['_id']
return Result
@app.post("/AddKRA/")
def AddKRA(info : dict):
req_info = info
req_info = dict(req_info)
print(req_info)
logger.info("recieved new patient details")
req_info['KRAID'] = generate_random_id_KRA()
try:
Check = KRAsData.insert_one(req_info)
if Check.acknowledged == True:
logger.info("patient added successfully")
return {"status": "success" , "KRAID": req_info['KRAID']}
else:
logger.info("patient not added")
return {"status": "failed"}
except Exception as e:
logger.error(e)
return {"status": "failed"}
@app.get("/GetAllKRAs/")
def GetAllKRAs():
logger.info("recieved all employee details")
Result = list(KRAsData.find({}, {'KRAID': 1, '_id': 0, 'title' : 1}))
if Result is None:
return {"status": "failed"}
else:
for i in Result:
i['value'] = i['KRAID'] + " " + i['title']
i["label"] = i['KRAID'] + " " + i['title']
del i['KRAID']
del i['title']
return Result
@app.get("/GetAllKRAsData/")
def GetAllKRAsData():
logger.info("recieved all employee details")
Result = list(KRAsData.find({}))
if Result is None:
return {"status": "failed"}
else:
for i in Result:
del i['_id']
return Result
@app.post("/GetKRA/")
def GetKRA(info : dict):
req_info = info
req_info = dict(req_info)
KRA_ID = req_info['KRAID']
Result = KRAsData.find_one({"KRAID": KRA_ID})
if Result is None:
return {"status": "failed"}
else:
del Result['_id']
return Result
@app.post("/GetKRAsForEmployee/")
def GetKRAsForEmployee(info : dict):
req_info = info
req_info = dict(req_info)
EmployeeID = req_info['EmployeeID']
Result = EmployeeData.find_one({"personalInformation.EmployeeID": EmployeeID})
if Result is None:
return {"status": "failed"}
else:
del Result['_id']
return Result['ListOfKRAs'] | [
"['Jon is very good boy and works and microsoft']"
] |
2024-01-10 | ergv03/chat-with-pdf-llm | app_chat.py | import streamlit as st
import os
from constants import search_number_messages
from langchain_utils import initialize_chat_conversation
from search_indexing import download_and_index_pdf
import re
def remove_url(url_to_remove):
"""
Remove URLs from the session_state. Triggered by the respective button
"""
if url_to_remove in st.session_state.urls:
st.session_state.urls.remove(url_to_remove)
# Page title
st.set_page_config(page_title='Talk with PDFs using LLMs - Beta')
st.title('Talk with PDFs using LLMs - (Beta)')
# Initialize the faiss_index key in the session state. This can be used to avoid having to download and embed the same PDF
# every time the user asks a question
if 'faiss_index' not in st.session_state:
st.session_state['faiss_index'] = {
'indexed_urls': [],
'index': None
}
# Initialize conversation memory used by Langchain
if 'conversation_memory' not in st.session_state:
st.session_state['conversation_memory'] = None
# Initialize chat history used by StreamLit (for display purposes)
if "messages" not in st.session_state:
st.session_state.messages = []
# Store the URLs added by the user in the UI
if 'urls' not in st.session_state:
st.session_state.urls = []
with st.sidebar:
openai_api_key = st.text_input('Step 1 - OpenAI API Key:', type='password')
# Add/Remove URLs form
with st.form('urls-form', clear_on_submit=True):
url = st.text_input('Step 2 - URLs to relevant PDFs: ')
add_url_button = st.form_submit_button('Add')
if add_url_button:
if url not in st.session_state.urls:
st.session_state.urls.append(url)
# Display a container with the URLs added by the user so far
with st.container():
if st.session_state.urls:
st.header('URLs added:')
for url in st.session_state.urls:
st.write(url)
st.button(label='Remove', key=f"Remove {url}", on_click=remove_url, kwargs={'url_to_remove': url})
st.divider()
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# React to user input
if query_text := st.chat_input("Your message"):
os.environ['OPENAI_API_KEY'] = openai_api_key
# Display user message in chat message container, and append to session state
st.chat_message("user").markdown(query_text)
st.session_state.messages.append({"role": "user", "content": query_text})
# Check if FAISS index already exists, or if it needs to be created as it includes new URLs
session_urls = st.session_state.urls
if st.session_state['faiss_index']['index'] is None or set(st.session_state['faiss_index']['indexed_urls']) != set(session_urls):
st.session_state['faiss_index']['indexed_urls'] = session_urls
with st.spinner('Downloading and indexing PDFs...'):
faiss_index = download_and_index_pdf(session_urls)
st.session_state['faiss_index']['index'] = faiss_index
else:
faiss_index = st.session_state['faiss_index']['index']
# Check if conversation memory has already been initialized and is part of the session state
if st.session_state['conversation_memory'] is None:
conversation = initialize_chat_conversation(faiss_index)
st.session_state['conversation_memory'] = conversation
else:
conversation = st.session_state['conversation_memory']
# Search PDF snippets using the last few user messages
user_messages_history = [message['content'] for message in st.session_state.messages[-search_number_messages:] if message['role'] == 'user']
user_messages_history = '\n'.join(user_messages_history)
with st.spinner('Querying OpenAI GPT...'):
response = conversation.predict(input=query_text, user_messages_history=user_messages_history)
# Display assistant response in chat message container
with st.chat_message("assistant"):
st.markdown(response)
snippet_memory = conversation.memory.memories[1]
for page_number, snippet in zip(snippet_memory.pages, snippet_memory.snippets):
with st.expander(f'Snippet from page {page_number + 1}'):
# Remove the <START> and <END> tags from the snippets before displaying them
snippet = re.sub("<START_SNIPPET_PAGE_\d+>", '', snippet)
snippet = re.sub("<END_SNIPPET_PAGE_\d+>", '', snippet)
st.markdown(snippet)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
| [] |
2024-01-10 | ergv03/chat-with-pdf-llm | search_indexing.py | from langchain import FAISS
from langchain.document_loaders import PyPDFium2Loader
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
import pypdfium2 as pdfium
from constants import chunk_size, chunk_overlap, number_snippets_to_retrieve
def download_and_index_pdf(urls: list[str]) -> FAISS:
"""
Download and index a list of PDFs based on the URLs
"""
def __update_metadata(pages, url):
"""
Add to the document metadata the title and original URL
"""
for page in pages:
pdf = pdfium.PdfDocument(page.metadata['source'])
title = pdf.get_metadata_dict().get('Title', url)
page.metadata['source'] = url
page.metadata['title'] = title
return pages
all_pages = []
for url in urls:
loader = PyPDFium2Loader(url)
splitter = CharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
pages = loader.load_and_split(splitter)
pages = __update_metadata(pages, url)
all_pages += pages
faiss_index = FAISS.from_documents(all_pages, OpenAIEmbeddings())
return faiss_index
def search_faiss_index(faiss_index: FAISS, query: str, top_k: int = number_snippets_to_retrieve) -> list:
"""
Search a FAISS index, using the passed query
"""
docs = faiss_index.similarity_search(query, k=top_k)
return docs
| [] |
2024-01-10 | midnight-learners/linguAML | linguaml~cli~tune~llm~tune.py | # Imports from this package
from .config import TuningSettings
def tune(tuning_settings: TuningSettings) -> None:
# Set up the environment
from linguaml.rl.env import Env
from linguaml.data.dataset import load_dataset
env = Env(
datasets=[
load_dataset(name=name)
for name in tuning_settings.dataset_names
],
performance_metric=tuning_settings.performance_metric,
lookback=tuning_settings.lookback,
fitting_time_limit=tuning_settings.fitting_time_limit,
random_state=tuning_settings.random_state
)
# LLM Agent
from linguaml.llm.agent import Agent
from linguaml.tolearn.family import Family
from linguaml.llm.openai.chat import OpenAIChatModel
agent = Agent(
family=Family.from_name(tuning_settings.family_name),
numeric_hp_bounds=tuning_settings.numeric_hp_bounds,
chat_model=OpenAIChatModel(
model_name=tuning_settings.chat_model_name,
temperature=tuning_settings.temperature
)
)
# Create an LLM tuner
from linguaml.tuners import LLMTuner
from linguaml.rl.replay_buffer import ReplayBuffer
from linguaml.tolearn.performance import PerformanceResultBuffer
tuner = LLMTuner(
env=env,
agent=agent,
replay_buffer=ReplayBuffer(
capacity=tuning_settings.replay_buffer_capacity
),
performance_result_buffer=PerformanceResultBuffer(
capacity=tuning_settings.performance_result_buffer_capacity
)
)
# Tune!
tuner.tune(
n_epochs=tuning_settings.n_epochs
)
| [] |
2024-01-10 | midnight-learners/linguAML | linguaml~cli~tune~hybrid~tune.py | from .config import TuningSettings
def tune(tuning_settings: TuningSettings) -> None:
# Environment
from linguaml.rl.env import Env
from linguaml.data.dataset import load_dataset
env = Env(
datasets=[
load_dataset(name=name)
for name in tuning_settings.dataset_names
],
performance_metric=tuning_settings.performance_metric,
lookback=tuning_settings.lookback,
fitting_time_limit=tuning_settings.fitting_time_limit,
random_state=tuning_settings.random_state
)
# RL agent
from linguaml.rl.agent import Agent as RLAgent
from linguaml.tolearn.family import Family
rl_agent = RLAgent(
family=Family.from_name(tuning_settings.family_name),
numeric_hp_bounds=tuning_settings.numeric_hp_bounds,
hidden_size=tuning_settings.hidden_size,
cont_dist_family=tuning_settings.cont_dist_family,
)
# LLM agent
from linguaml.llm.agent import Agent as LLMAgent
from linguaml.llm.openai.chat import OpenAIChatModel
llm_agent = LLMAgent(
family=Family.from_name(tuning_settings.family_name),
numeric_hp_bounds=tuning_settings.numeric_hp_bounds,
chat_model=OpenAIChatModel(
model_name=tuning_settings.chat_model_name,
temperature=tuning_settings.temperature
)
)
# Advantage calulator
from linguaml.rl.advantage import AdvantageCalculator
advantage_calculator = AdvantageCalculator(
moving_average_alg=tuning_settings.moving_average_alg,
period=tuning_settings.sma_period,
alpha=tuning_settings.ema_alpha
)
# Create a hybrid tuner
from linguaml.tuners import HybridTuner
from linguaml.rl.replay_buffer import ReplayBuffer
from linguaml.tolearn.performance import PerformanceResultBuffer
tuner = HybridTuner(
env=env,
rl_agent=rl_agent,
llm_agent=llm_agent,
replay_buffer=ReplayBuffer(
capacity=tuning_settings.replay_buffer_capacity
),
performance_result_buffer=PerformanceResultBuffer(
capacity=tuning_settings.performance_result_buffer_capacity
),
advantage_calculator=advantage_calculator,
llm_agent_sampling_freq=tuning_settings.llm_agent_sampling_freq
)
# Tune!
from torch.optim import Adam
tuner.tune(
n_epochs=tuning_settings.n_epochs,
batch_size=tuning_settings.batch_size,
min_batch_size=tuning_settings.min_batch_size,
n_steps_for_updating_agent=tuning_settings.n_steps_for_updating_agent,
optimizer=Adam(
rl_agent.parameters(),
lr=tuning_settings.adam_lr
),
ppo_epsilon=tuning_settings.ppo_epsilon
)
| [] |
2024-01-10 | nirgodin/radio-stations-data-collection | data_collectors~components~collectors~collectors_component_factory.py | from data_collectors.components.collectors.billboard_collectors_component_factory import \
BillboardCollectorsComponentFactory
from data_collectors.components.collectors.genius_collectors_component_factory import GeniusCollectorsComponentFactory
from data_collectors.components.collectors.google_collectors_component_factory import GoogleCollectorsComponentFactory
from data_collectors.components.collectors.musixmatch_collectors_component_factory import \
MusixmatchCollectorsComponentFactory
from data_collectors.components.collectors.openai_collectors_component_factory import OpenAICollectorsComponentFactory
from data_collectors.components.collectors.shazam_collectors_component_factory import ShazamCollectorsComponentFactory
from data_collectors.components.collectors.spotify_collectors_component_factory import SpotifyCollectorsComponentFactory
from data_collectors.components.collectors.wikipedia_collectors_component_factory import \
WikipediaCollectorsComponentFactory
class CollectorsComponentFactory:
def __init__(self,
billboard: BillboardCollectorsComponentFactory = BillboardCollectorsComponentFactory(),
genius: GeniusCollectorsComponentFactory = GeniusCollectorsComponentFactory(),
google: GoogleCollectorsComponentFactory = GoogleCollectorsComponentFactory(),
musixmatch: MusixmatchCollectorsComponentFactory = MusixmatchCollectorsComponentFactory(),
openai: OpenAICollectorsComponentFactory = OpenAICollectorsComponentFactory(),
shazam: ShazamCollectorsComponentFactory = ShazamCollectorsComponentFactory(),
spotify: SpotifyCollectorsComponentFactory = SpotifyCollectorsComponentFactory(),
wikipedia: WikipediaCollectorsComponentFactory = WikipediaCollectorsComponentFactory()):
self.billboard = billboard
self.genius = genius
self.google = google
self.musixmatch = musixmatch
self.openai = openai
self.shazam = shazam
self.spotify = spotify
self.wikipedia = wikipedia
| [] |
2024-01-10 | nirgodin/radio-stations-data-collection | data_collectors~logic~collectors~openai~track_names_embeddings_collector.py | from typing import List, Dict, Optional
from genie_common.models.openai import EmbeddingsModel
from genie_common.openai import OpenAIClient
from genie_common.tools import logger
from genie_common.utils import merge_dicts
from sqlalchemy.ext.asyncio import AsyncEngine
from data_collectors.contract import ICollector
from data_collectors.logic.models import MissingTrack
from genie_common.tools import AioPoolExecutor
class TrackNamesEmbeddingsCollector(ICollector):
def __init__(self, db_engine: AsyncEngine, pool_executor: AioPoolExecutor, openai_client: OpenAIClient):
self._db_engine = db_engine
self._openai_client = openai_client
self._pool_executor = pool_executor
async def collect(self, missing_tracks: List[MissingTrack]) -> Dict[MissingTrack, Optional[List[float]]]:
logger.info(f"Starting to collect embeddings for {len(missing_tracks)} tracks")
results = await self._pool_executor.run(
iterable=missing_tracks,
func=self._get_single_name_embeddings,
expected_type=dict
)
return merge_dicts(*results)
async def _get_single_name_embeddings(self, missing_track: MissingTrack) -> Dict[MissingTrack, Optional[List[float]]]:
embeddings = await self._openai_client.embeddings.collect(
text=missing_track.track_name,
model=EmbeddingsModel.ADA
)
return {missing_track: embeddings}
| [] |
2024-01-10 | paulovsm/store-api-nestjs | query_vector_db.py | import os
import pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
# initialize pinecone
pinecone.init(
api_key=os.getenv("PINECONE_API_KEY"), # find at app.pinecone.io
environment=os.getenv("PINECONE_ENV"), # next to api key in console
)
index_name = "fleek-authority-index"
# First, check if our index already exists. If it doesn't, we create it
if index_name not in pinecone.list_indexes():
# we create a new index
pinecone.create_index(
name=index_name,
metric='cosine',
dimension=1536
)
# if you already have an index, you can load it like this
docsearch = Pinecone.from_existing_index(index_name, embeddings)
query = "Try wearing Allen Edmonds Men's Park Avenue Cap-Toe Oxfords. These black, classic leather shoes are handcrafted and made with high attention to detail. Their sleek, lace-up design adds a formal and quintessential look to any outfit."
docs = docsearch.similarity_search(query, 10)
print(len(docs))
for doc in docs:
print(doc.page_content)
print(doc.metadata) | [] |
2024-01-10 | Pacatro/ChatBot-SIF | functions~functions.py | # Note: you need to be using OpenAI Python v0.27.0 for the code below to work
import os
import openai
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from dotenv import load_dotenv
def bot_response(message):
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
description = """
Eres un chatbot dedicado a la atención al cliente de una empresa llamada FixNexus dedicada a la venta
y reparación de productos tecnológicos como mandos de consola, ordenadores, periféricos, etc. Tu labor
será la de atender todas las dudas de los clientes de la empresa de la manera más educada y eficiente
posible.
"""
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = [
{"role": "system", "content": description},
{"role": "user", "content": message}
]
)
return response['choices'][0]['message']['content']
def send_email(username, user_email, user_message, receiver_email):
load_dotenv()
mail_content = ""
subject = ""
if receiver_email == os.getenv("FIXNEXUS_ADDRESS"):
subject = f"El usuario {username} quiere contactar con nosotros"
mail_content = f"El usuario {username} con email {user_email} quiere contactar con nosotros por:\n{user_message}"
else:
subject = "Hemos recibido tu mensaje!"
mail_content = f"Hola {username}!\n Hemos enviado tu sugerencia a nuestro equipo de soporte y te responderemos lo antes posible."
sender_email = os.getenv("FIXNEXUS_ADDRESS")
sender_pass = os.getenv("FIXNEXUS_PASS")
message = MIMEMultipart()
message['From'] = sender_email
message['To'] = receiver_email
message['Subject'] = subject
message.attach(MIMEText(mail_content, 'plain'))
session = smtplib.SMTP('smtp-mail.outlook.com', 587)
session.starttls()
session.login(sender_email, sender_pass)
text = message.as_string()
session.sendmail(sender_email, receiver_email, text)
session.quit()
| [] |
2024-01-10 | webclinic017/live-dd-reports | prototype.py | ################################################################################
### Step 1
################################################################################
import requests
import re
import urllib.request
from bs4 import BeautifulSoup
from collections import deque
from html.parser import HTMLParser
from urllib.parse import urlparse
import os
import pandas as pd
import tiktoken
import openai
import numpy as np
import os
from openai.embeddings_utils import distances_from_embeddings, cosine_similarity
# Regex pattern to match a URL
HTTP_URL_PATTERN = r'^http[s]{0,1}://.+$'
# Define root domain to crawl
domain = "www.flexlng.com"
full_url = "https://www.flexlng.com/flex-lng-fourth-quarter-2022-earnings-release/"
# Create a class to parse the HTML and get the hyperlinks
class HyperlinkParser(HTMLParser):
def __init__(self):
super().__init__()
# Create a list to store the hyperlinks
self.hyperlinks = []
# Override the HTMLParser's handle_starttag method to get the hyperlinks
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
# If the tag is an anchor tag and it has an href attribute, add the href attribute to the list of hyperlinks
if tag == "a" and "href" in attrs:
self.hyperlinks.append(attrs["href"])
################################################################################
### Step 2
################################################################################
# Function to get the hyperlinks from a URL
def get_hyperlinks(url):
# Try to open the URL and read the HTML
try:
# Open the URL and read the HTML
with urllib.request.urlopen(url) as response:
# If the response is not HTML, return an empty list
if not response.info().get('Content-Type').startswith("text/html"):
return []
# Decode the HTML
html = response.read().decode('utf-8')
except Exception as e:
print(e)
return []
# Create the HTML Parser and then Parse the HTML to get hyperlinks
parser = HyperlinkParser()
parser.feed(html)
return parser.hyperlinks
################################################################################
### Step 3
################################################################################
# Function to get the hyperlinks from a URL that are within the same domain
def get_domain_hyperlinks(local_domain, url):
clean_links = []
for link in set(get_hyperlinks(url)):
clean_link = None
# If the link is a URL, check if it is within the same domain
if re.search(HTTP_URL_PATTERN, link):
# Parse the URL and check if the domain is the same
url_obj = urlparse(link)
if url_obj.netloc == local_domain:
clean_link = link
# If the link is not a URL, check if it is a relative link
else:
if link.startswith("/"):
link = link[1:]
elif (
link.startswith("#")
or link.startswith("mailto:")
or link.startswith("tel:")
):
continue
clean_link = "https://" + local_domain + "/" + link
if clean_link is not None:
if clean_link.endswith("/"):
clean_link = clean_link[:-1]
clean_links.append(clean_link)
# Return the list of hyperlinks that are within the same domain
return list(set(clean_links))
from urllib.parse import urlparse
from collections import deque
import os
from bs4 import BeautifulSoup
import requests
def crawl_single_page(url):
# Parse the URL and get the domain
local_domain = urlparse(url).netloc
# Create a directory to store the text files
if not os.path.exists("text/"):
os.mkdir("text/")
if not os.path.exists("text/"+local_domain+"/"):
os.mkdir("text/" + local_domain + "/")
# Create a directory to store the csv files
if not os.path.exists("processed"):
os.mkdir("processed")
# Ensure the directory exists
directory = os.path.join('text', local_domain)
os.makedirs(directory, exist_ok=True)
# Create the file path using os.path.join()
file_path = os.path.join(directory, url[8:].replace("/", "_") + ".txt")
try:
# Attempt to open the file
# Save text from the url to a <url>.txt file
with open(file_path, "w", encoding="UTF-8") as f:
# Your code to write the content
# Your code to write the content
# Get the text from the URL using BeautifulSoup
soup = BeautifulSoup(requests.get(url).text, "html.parser")
# Get the text but remove the tags
text = soup.get_text()
# If the crawler gets to a page that requires JavaScript, it will stop the crawl
if ("You need to enable JavaScript to run this app." in text):
print("Unable to parse page " + url + " due to JavaScript being required")
# Otherwise, write the text to the file in the text directory
f.write(text)
except Exception as e:
print(f"Skipping file {file_path} due to an error: {e}")
################################################################################
### Step 4
################################################################################
def crawl(url):
# Parse the URL and get the domain
local_domain = urlparse(url).netloc
# Create a queue to store the URLs to crawl
queue = deque([url])
# Create a set to store the URLs that have already been seen (no duplicates)
seen = set([url])
# Create a directory to store the text files
if not os.path.exists("text/"):
os.mkdir("text/")
if not os.path.exists("text/"+local_domain+"/"):
os.mkdir("text/" + local_domain + "/")
# Create a directory to store the csv files
if not os.path.exists("processed"):
os.mkdir("processed")
# While the queue is not empty, continue crawling
while queue:
# Get the next URL from the queue
url = queue.pop()
print(url) # for debugging and to see the progress
# Ensure the directory exists
directory = os.path.join('text', local_domain)
os.makedirs(directory, exist_ok=True)
# Create the file path using os.path.join()
file_path = os.path.join(directory, url[8:].replace("/", "_") + ".txt")
try:
# Attempt to open the file
# Save text from the url to a <url>.txt file
with open(file_path, "w", encoding="UTF-8") as f:
# Your code to write the content
# Your code to write the content
# Get the text from the URL using BeautifulSoup
soup = BeautifulSoup(requests.get(url).text, "html.parser")
# Get the text but remove the tags
text = soup.get_text()
# If the crawler gets to a page that requires JavaScript, it will stop the crawl
if ("You need to enable JavaScript to run this app." in text):
print("Unable to parse page " + url + " due to JavaScript being required")
# Otherwise, write the text to the file in the text directory
f.write(text)
except Exception as e:
print(f"Skipping file {file_path} due to an error: {e}")
# Get the hyperlinks from the URL and add them to the queue
for link in get_domain_hyperlinks(local_domain, url):
if link not in seen:
queue.append(link)
seen.add(link)
#crawl(full_url)
crawl_single_page(full_url)
################################################################################
### Step 5
################################################################################
def remove_newlines(serie):
serie = serie.str.replace('\n', ' ')
serie = serie.str.replace('\\n', ' ')
serie = serie.str.replace(' ', ' ')
serie = serie.str.replace(' ', ' ')
return serie
################################################################################
### Step 6
################################################################################
# Create a list to store the text files
texts=[]
# Get all the text files in the text directory
for file in os.listdir(os.path.join("text", domain)):
# Create the file path using os.path.join()
file_path = os.path.join("text", domain, file)
# Open the file and read the text
with open(file_path, "r", encoding="UTF-8") as f:
text = f.read()
# Omit the first 11 lines and the last 4 lines, then replace -, _, and #update with spaces.
texts.append((file[11:-4].replace('-', ' ').replace('_', ' ').replace('#update', ''), text))
# Create a dataframe from the list of texts
df = pd.DataFrame(texts, columns = ['fname', 'text'])
# Set the text column to be the raw text with the newlines removed
df['text'] = df.fname + ". " + remove_newlines(df.text)
df.to_csv('processed/scraped.csv')
df.head()
################################################################################
### Step 7
################################################################################
# Load the cl100k_base tokenizer which is designed to work with the ada-002 model
tokenizer = tiktoken.get_encoding("cl100k_base")
df = pd.read_csv('processed/scraped.csv', index_col=0)
df.columns = ['title', 'text']
# Tokenize the text and save the number of tokens to a new column
df['n_tokens'] = df.text.apply(lambda x: len(tokenizer.encode(x)))
# Visualize the distribution of the number of tokens per row using a histogram
df.n_tokens.hist()
################################################################################
### Step 8
################################################################################
max_tokens = 500
# Function to split the text into chunks of a maximum number of tokens
def split_into_many(text, max_tokens = max_tokens):
# Split the text into sentences
sentences = text.split('. ')
# Get the number of tokens for each sentence
n_tokens = [len(tokenizer.encode(" " + sentence)) for sentence in sentences]
chunks = []
tokens_so_far = 0
chunk = []
# Loop through the sentences and tokens joined together in a tuple
for sentence, token in zip(sentences, n_tokens):
# If the number of tokens so far plus the number of tokens in the current sentence is greater
# than the max number of tokens, then add the chunk to the list of chunks and reset
# the chunk and tokens so far
if tokens_so_far + token > max_tokens:
chunks.append(". ".join(chunk) + ".")
chunk = []
tokens_so_far = 0
# If the number of tokens in the current sentence is greater than the max number of
# tokens, go to the next sentence
if token > max_tokens:
continue
# Otherwise, add the sentence to the chunk and add the number of tokens to the total
chunk.append(sentence)
tokens_so_far += token + 1
# Add the last chunk to the list of chunks
if chunk:
chunks.append(". ".join(chunk) + ".")
return chunks
shortened = []
# Loop through the dataframe
for row in df.iterrows():
# If the text is None, go to the next row
if row[1]['text'] is None:
continue
# If the number of tokens is greater than the max number of tokens, split the text into chunks
if row[1]['n_tokens'] > max_tokens:
shortened += split_into_many(row[1]['text'])
# Otherwise, add the text to the list of shortened texts
else:
shortened.append( row[1]['text'] )
################################################################################
### Step 9
################################################################################
df = pd.DataFrame(shortened, columns = ['text'])
df['n_tokens'] = df.text.apply(lambda x: len(tokenizer.encode(x)))
df.n_tokens.hist()
################################################################################
### Step 10
################################################################################
# Note that you may run into rate limit issues depending on how many files you try to embed
# Please check out our rate limit guide to learn more on how to handle this: https://platform.openai.com/docs/guides/rate-limits
df['embeddings'] = df.text.apply(lambda x: openai.Embedding.create(input=x, engine='text-embedding-ada-002')['data'][0]['embedding'])
df.to_csv('processed/embeddings.csv')
df.head()
################################################################################
### Step 11
################################################################################
df=pd.read_csv('processed/embeddings.csv', index_col=0)
df['embeddings'] = df['embeddings'].apply(eval).apply(np.array)
df.head()
################################################################################
### Step 12
################################################################################
def create_context(
question, df, max_len=1800, size="ada"
):
"""
Create a context for a question by finding the most similar context from the dataframe
"""
# Get the embeddings for the question
q_embeddings = openai.Embedding.create(input=question, engine='text-embedding-ada-002')['data'][0]['embedding']
# Get the distances from the embeddings
df['distances'] = distances_from_embeddings(q_embeddings, df['embeddings'].values, distance_metric='cosine')
returns = []
cur_len = 0
# Sort by distance and add the text to the context until the context is too long
for i, row in df.sort_values('distances', ascending=True).iterrows():
# Add the length of the text to the current length
cur_len += row['n_tokens'] + 4
# If the context is too long, break
if cur_len > max_len:
break
# Else add it to the text that is being returned
returns.append(row["text"])
# Return the context
return "\n\n###\n\n".join(returns)
def answer_question(
df,
model="text-davinci-003",
question="Am I allowed to publish model outputs to Twitter, without a human review?",
max_len=1800,
size="ada",
debug=False,
max_tokens=2000,
stop_sequence=None
):
"""
Answer a question based on the most similar context from the dataframe texts
"""
context = create_context(
question,
df,
max_len=max_len,
size=size,
)
# If debug, print the raw model response
if debug:
print("Context:\n" + context)
print("\n\n")
try:
# Create a completions using the questin and context
response = openai.Completion.create(
prompt=f"Answer the question based on the context below\n\nContext: {context}\n\n---\n\nQuestion: {question}\nAnswer:",
temperature=0,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=stop_sequence,
model=model,
)
return response["choices"][0]["text"].strip()
except Exception as e:
print(e)
return ""
################################################################################
### Step 13
################################################################################
print(answer_question(df, question="Can you summarize in 2 paragraphs?", debug=True))
#print(answer_question(df, question="What are the 2022 financial ratios?", debug=True))
#print(answer_question(df, question="What is our newest embeddings model?")) | [
"Answer the question based on the context below\n\nContext: PLACEHOLDER\n\n---\n\nQuestion: PLACEHOLDER\nAnswer:"
] |
2024-01-10 | TIGER-AI-Lab/UniIR | src~models_scripts~uniir_clip~clip_featurefusion~clip_ff.py | """
Feature level fusion model using CLIP
Code adapted from OpenAI's CLIP codebase
"""
from typing import Any, Optional, Tuple, Union
from dataclasses import dataclass
# Import from third library
import torch
from torch import nn
import torch.nn.functional as F
import torch.distributed.nn
from transformers.models.t5 import T5Config
from transformers.models.t5.modeling_t5 import T5Block, T5Stack
import clip
from clip.model import VisionTransformer
# Import from project
from models_scripts.uniir_clip.clip_featurefusion.clip_ff_conf import conf_t5, conf_t5_vit_large
class VisionTransformerWithoutPooling(VisionTransformer):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__(input_resolution, patch_size, width, layers, heads, output_dim)
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
# x = self.ln_post(x[:, 0, :])
x = self.ln_post(x)
if self.proj is not None:
x = x @ self.proj
return x
class CLIPFeatureFusion_OPENAI(nn.Module):
""" CLIP Feature Fusion model implemented using OpenAI's CLIP and HuggingFace Transformers' T5 models
"""
def __init__(self, model_name="ViT-B/32", device="cuda", jit=False, download_root=None, config=None):
super().__init__()
# Load pre-trained CLIP model
self.clip_model, self.img_preprocess_fn = clip.load(
model_name, device, jit, download_root=download_root)
self.tokenizer = clip.tokenize
self.loss_function = nn.CrossEntropyLoss()
self.t5_layers = T5Stack(conf_t5) if model_name == "ViT-B/32" else T5Stack(conf_t5_vit_large)
if config is not None:
self.gather_embeddings = config.model.gather_embeddings
self.in_batch_neg_num = config.data_config.in_batch_neg_num
else:
self.gather_embeddings = None
self.in_batch_neg_num = None
# to avoid unused parameters warning when doing distributed training
del self.clip_model.text_projection
state_dict = self.clip_model.visual.state_dict()
self.clip_model.visual = self.get_vision_transformer(model_name=model_name)
self.clip_model.visual.load_state_dict(state_dict)
self.clip_model.float()
def get_vision_transformer(self, model_name="VIT-B/32"):
if model_name == "ViT-B/32":
return VisionTransformerWithoutPooling(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512
)
elif model_name == "ViT-L/14":
return VisionTransformerWithoutPooling(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768
)
else:
raise NotImplementedError("Only ViT-B/32 and ViT-L/14 are supported.")
def get_img_preprocess_fn(self):
return self.img_preprocess_fn
def get_tokenizer(self):
""" Get the tokenize function used by the CLIP model
"""
def tokenizer_wrapper(txt):
tokenizer = self.tokenizer
txt_tensor = tokenizer(txt, context_length=77, truncate=True)
return txt_tensor
return tokenizer_wrapper
def encode_text(self, text_tensor):
x = self.clip_model.token_embedding(text_tensor).type(self.clip_model.dtype) # [batch_size, n_ctx, d_model]
x = x + self.clip_model.positional_embedding.type(self.clip_model.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.clip_model.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.clip_model.ln_final(x).type(self.clip_model.dtype)
return x
def encode_image(self, image_tensor):
return self.clip_model.encode_image(image_tensor) # [batch_size, seq_len, embed_dim]
def encode_multimodal_input(self, txt_tensor, img_tensor, txt_mask, img_mask):
""" Encode multimodal input using CLIP and T5 models
Args:
txt_tensor (_type_): text tensor [batch_size, seq_len]
img_tensor (_type_): image tensor [batch_size, 3, img_size, img_size]
txt_mask (_type_): text mask [batch_size]
img_mask (_type_): image mask [batch_size]
Returns:
multimodal_emb (_type_): _description_ multimodal embeddings [batch_size, embed_dim]
"""
txt_feat = self.encode_text(txt_tensor)
# txt_feat = txt_feat * txt_mask.unsqueeze(-1).unsqueeze(-1).expand_as(txt_feat)
img_feat = self.encode_image(img_tensor)
# img_feat = img_feat * img_mask.unsqueeze(-1).unsqueeze(-1).expand_as(img_feat)
combined_features = torch.cat([txt_feat, img_feat], dim=1) # shape: [batch_size, seq_len, embed_dim]
# combined_features = self.dense_clip_to_t5(combined_features)
transformer_output = self.t5_layers(
inputs_embeds=combined_features,
attention_mask=None,
use_cache=False,
return_dict=True
)
def mean_pooling(embeddings):
return torch.mean(embeddings, dim=1)
# Pool the output of the T5 transformer to get the final features
multimodal_emb = mean_pooling(transformer_output.last_hidden_state)
return multimodal_emb # shape: [batch_size, embed_dim]
def get_logit_scale(self):
return self.clip_model.logit_scale.exp()
def compute_inbatch_contrastive_loss(self, batch):
""" Compute the in-batch contrastive loss
Args:
batch (dict): batch dictionary consists of "txt_batch", "image_batch",
"txt_mask_batch", "image_mask_batch", "index_mapping"
Returns:
outputs (dict): dictionary of loss and accuracy
"""
txt_batched = batch["txt_batched"]
image_batched = batch["image_batched"]
txt_mask_batched = batch["txt_mask_batched"]
image_mask_batched = batch["image_mask_batched"]
index_mapping = batch["index_mapping"]
enable_hard_neg = "neg_cand_list" in index_mapping
# Compute embeddings
embeddings = self.encode_multimodal_input(txt_batched, image_batched, txt_mask_batched, image_mask_batched)
# Extract embeddings
q_embeds = embeddings[torch.tensor(index_mapping["query"]).flatten()] # shape: [bs, embed_dim]
p_embeds = embeddings[torch.tensor(index_mapping["pos_cand"]).flatten()] # shape: [bs, embed_dim]
n_embeds = None
if enable_hard_neg:
n_embeds = embeddings[torch.tensor(index_mapping["neg_cand_list"])] # [bs, neg_num, embed_dim]
bs = q_embeds.size(0)
# Normalized features
q_embeds = F.normalize(q_embeds, dim=-1)
p_embeds = F.normalize(p_embeds, dim=-1)
logit_scale = self.get_logit_scale()
# We gather tensors from all gpus
if self.gather_embeddings:
all_p_embeds = torch.cat(torch.distributed.nn.all_gather(p_embeds), dim=0) # [bs * num_gpus, embed_dim]
if enable_hard_neg:
# Normalize the negative embeddings
n_embeds = F.normalize(n_embeds, dim=-1)
# Number of in-batch positives to add as negatives
in_batch_neg_num = min(bs - 1, self.in_batch_neg_num)
# Augment neg_cand_embeddings with a subset of in-batch positive candidates from other queries
mask = torch.eye(bs).to(n_embeds.device) == 0
in_batch_negs = p_embeds.unsqueeze(1).expand(-1, bs, -1)[mask].reshape(bs, bs - 1, -1)
in_batch_negs = in_batch_negs[:, :in_batch_neg_num, :]
aug_n_embeds = torch.cat([n_embeds, in_batch_negs], dim=1) # [bs, neg_num + in_batch_neg_num, embed_dim]
# Compute similarity scores for positives and negatives
pos_scores = (q_embeds * p_embeds).sum(-1) * logit_scale # [bs]
neg_scores = (q_embeds.unsqueeze(1) * aug_n_embeds).sum(-1) * logit_scale # [bs, neg_num +in_batch_neg_num]
logit_matrix = torch.cat([pos_scores.unsqueeze(-1), neg_scores], 1) # [bs, neg_num + in_batch_neg_num + 1]
# Compute log softmax over the matrix
lsm = F.log_softmax(logit_matrix, dim=1)
# The NNL loss for the positive candidate
loss = torch.mean(-1.0 * lsm[:, 0])
# Compute accuracy by checking which instances have the positive candidate as the most similar one
_max_score, max_idxs = torch.max(logit_matrix, 1)
accuracy = (max_idxs == 0).sum() / bs
else:
if self.gather_embeddings:
score = torch.matmul(q_embeds, all_p_embeds.t()) * logit_scale # [bs, bs * num_gpus]
gpu_id = torch.distributed.get_rank()
sim_targets = (gpu_id * bs + torch.arange(bs)).to(score.device) # [bs]
else:
score = torch.matmul(q_embeds, p_embeds.t()) * logit_scale # [bs, bs]
sim_targets = torch.arange(bs).to(score.device) # [bs]
# compute loss
loss = self.loss_function(score, sim_targets)
_max_score, max_idxs = torch.max(score, 1)
accuracy = (max_idxs == sim_targets).sum() / bs
outputs = {"loss": loss, "accuracy": accuracy}
return outputs
def forward(self, batch, encode_mbeir_batch=False):
if encode_mbeir_batch:
return self.encode_mbeir_batch(batch)
return self.compute_inbatch_contrastive_loss(batch)
def encode_mbeir_batch(self, batch):
# Get hashed id_list
id_list = batch.get("did_list") or batch.get("qid_list")
assert id_list is not None, "id_list must be provided."
assert isinstance(id_list[0], int), "id_list must be hashed to int."
# Compute embeddings
embeddings = self.encode_multimodal_input(
batch["txt_batched"],
batch["image_batched"],
batch["txt_mask_batched"],
batch["image_mask_batched"]
)
assert embeddings.size(0) == len(id_list), "embeddings and id_batched must have the same batch size."
return embeddings, id_list
| [] |
2024-01-10 | TIGER-AI-Lab/UniIR | src~models_scripts~uniir_clip~clip_scorefusion~clip_sf.py | """
Score level fusion model using CLIP
Code adapted from OpenAI's CLIP codebase
"""
import torch
from torch import nn
import torch.nn.functional as F
import clip
import torch.distributed.nn
class CLIPScoreFusion(nn.Module):
def __init__(self, model_name="ViT-B/32", device="cuda", jit=False, download_root=None, config=None):
super().__init__()
# Load pre-trained CLIP model
self.clip_model, self.img_preprocess_fn = clip.load(model_name, device, jit, download_root=download_root)
self.tokenizer = clip.tokenize
self.loss_function = nn.CrossEntropyLoss()
if config is not None:
self.gather_embeddings = config.model.gather_embeddings
self.in_batch_neg_num = config.data_config.in_batch_neg_num
def get_img_preprocess_fn(self):
return self.img_preprocess_fn
def get_tokenizer(self):
def tokenizer_wrapper(txt):
tokenizer = self.tokenizer
txt_tensor = tokenizer(txt, context_length=77, truncate=True)
return txt_tensor
return tokenizer_wrapper
def encode_text(self, text_tensor):
return self.clip_model.encode_text(text_tensor)
def encode_image(self, image_tensor):
return self.clip_model.encode_image(image_tensor)
def fuse_embeddings(self, img_emb, txt_emb):
fused_emb = img_emb + txt_emb
return fused_emb
def encode_multimodal_input(self, txt_tensor, img_tensor, txt_mask, img_mask):
"""
:param txt_tensor:
:param img_tensor:
:param txt_mask: expected shape: [batch_size, 1]
:param img_mask: expected shape: [batch_size, 1]
:return:
"""
txt_emb = self.encode_text(txt_tensor) * txt_mask.unsqueeze(-1)
img_emb = self.encode_image(img_tensor) * img_mask.unsqueeze(-1)
return self.fuse_embeddings(txt_emb, img_emb) # shape: [batch_size, embed_dim]
def get_logit_scale(self):
return self.clip_model.logit_scale.exp()
def compute_inbatch_contrastive_loss(self, batch):
"""
adapted from the CLIP codebase and UniVL-DR codebase
:param model:
:param batch:
:param loss_function:
:return:
"""
txt_batched = batch["txt_batched"]
image_batched = batch["image_batched"]
txt_mask_batched = batch["txt_mask_batched"]
image_mask_batched = batch["image_mask_batched"]
index_mapping = batch["index_mapping"]
enable_hard_neg = "neg_cand_list" in index_mapping
# Compute embeddings
embeddings = self.encode_multimodal_input(txt_batched, image_batched, txt_mask_batched, image_mask_batched)
# Extract embeddings
q_embeds = embeddings[torch.tensor(index_mapping["query"]).flatten()] # shape: [bs, embed_dim]
p_embeds = embeddings[torch.tensor(index_mapping["pos_cand"]).flatten()] # shape: [bs, embed_dim]
n_embeds = None
if enable_hard_neg:
n_embeds = embeddings[torch.tensor(index_mapping["neg_cand_list"])] # [bs, neg_num, embed_dim]
bs = q_embeds.size(0)
# Normalized features
q_embeds = F.normalize(q_embeds, dim=-1)
p_embeds = F.normalize(p_embeds, dim=-1)
logit_scale = self.get_logit_scale()
# We gather tensors from all gpus
if self.gather_embeddings:
all_p_embeds = torch.cat(torch.distributed.nn.all_gather(p_embeds), dim=0) # [bs * num_gpus, embed_dim]
if enable_hard_neg:
# Normalize the negative embeddings
n_embeds = F.normalize(n_embeds, dim=-1)
# Number of in-batch positives to add as negatives
in_batch_neg_num = min(bs - 1, self.in_batch_neg_num)
# Augment neg_cand_embeddings with a subset of in-batch positive candidates from other queries
mask = torch.eye(bs).to(n_embeds.device) == 0
in_batch_negs = p_embeds.unsqueeze(1).expand(-1, bs, -1)[mask].reshape(bs, bs - 1, -1)
in_batch_negs = in_batch_negs[:, :in_batch_neg_num, :]
aug_n_embeds = torch.cat([n_embeds, in_batch_negs], dim=1) # [bs, neg_num + in_batch_neg_num, embed_dim]
# Compute similarity scores for positives and negatives
pos_scores = (q_embeds * p_embeds).sum(-1) * logit_scale # [bs]
neg_scores = (q_embeds.unsqueeze(1) * aug_n_embeds).sum(-1) * logit_scale # [bs, neg_num +in_batch_neg_num]
logit_matrix = torch.cat([pos_scores.unsqueeze(-1), neg_scores], 1) # [bs, neg_num + in_batch_neg_num + 1]
# Compute log softmax over the matrix
lsm = F.log_softmax(logit_matrix, dim=1)
# The NNL loss for the positive candidate
loss = torch.mean(-1.0 * lsm[:, 0])
# Compute accuracy by checking which instances have the positive candidate as the most similar one
_max_score, max_idxs = torch.max(logit_matrix, 1)
accuracy = (max_idxs == 0).sum() / bs
else:
if self.gather_embeddings:
score = torch.matmul(q_embeds, all_p_embeds.t()) * logit_scale # [bs, bs * num_gpus]
gpu_id = torch.distributed.get_rank()
sim_targets = (gpu_id * bs + torch.arange(bs)).to(score.device) # [bs]
else:
score = torch.matmul(q_embeds, p_embeds.t()) * logit_scale # [bs, bs]
sim_targets = torch.arange(bs).to(score.device) # [bs]
# compute loss
loss = self.loss_function(score, sim_targets)
_max_score, max_idxs = torch.max(score, 1)
accuracy = (max_idxs == sim_targets).sum() / bs
outputs = {"loss": loss, "accuracy": accuracy}
return outputs
def forward(self, batch, encode_mbeir_batch=False):
if encode_mbeir_batch:
return self.encode_mbeir_batch(batch)
return self.compute_inbatch_contrastive_loss(batch)
def encode_mbeir_batch(self, batch):
# Get hashed id_list
id_list = batch.get("did_list") or batch.get("qid_list")
assert id_list is not None, "id_list must be provided."
assert isinstance(id_list[0], int), "id_list must be hashed to int."
# Compute embeddings
embeddings = self.encode_multimodal_input(
batch["txt_batched"], batch["image_batched"], batch["txt_mask_batched"], batch["image_mask_batched"]
)
assert embeddings.size(0) == len(id_list), "embeddings and id_batched must have the same batch size."
return embeddings, id_list
| [] |
2024-01-10 | mastersilvio/silvia | streamlit_app.py | import streamlit as st
from langchain.llms import OpenAI
import os
from dotenv import load_dotenv
load_dotenv()
st.set_page_config(page_title="SilvIA", page_icon="images/silvIA.png")
st.image("images/silvIA.png", width=100)
st.title('SilvIA')
st.subheader('A sua assistente para gerar questões utilizando Inteligência Artificial')
disciplines = [
'Língua Portuguesa',
'Matemática',
'Biologia',
'Física',
'Química',
'História',
'Geografia',
'Sociologia',
'Filosofia'
]
grades = [
'6º ano do Ensino Fundamental',
'7º ano do Ensino Fundamental',
'8º ano do Ensino Fundamental',
'9º ano do Ensino Fundamental',
'1ª série do Ensino Médio',
'2ª série do Ensino Médio',
'3º série do Ensino Médio'
]
openai_api_key = os.getenv('OPENAI_API_KEY')
discipline = st.selectbox('Escolha uma disciplina:', disciplines)
grade = st.selectbox('Escolha uma série:', grades)
content = st.text_input('Conteúdo:')
quantity = st.number_input('Quantidade de questões:', min_value=1, max_value=10, value=1)
multiple_choice = st.checkbox('Com questões de múltipla escolha?')
problem_situation = st.checkbox('Com situação problema?')
competition = st.checkbox('Com questões de concurso?')
answer = st.checkbox('Com resposta no final?')
def generate_response(input_text):
llm = OpenAI(temperature=0.7, openai_api_key=openai_api_key, max_tokens=2048)
st.info(llm(input_text))
with st.form('my_form'):
text = 'Crie uma prova para que eu possa estudar e contenha as seguintes características:\n'
text += f'- Disciplina: {discipline}\n'
text += f'- Série: {grade}\n'
text += f'- Conteúdo: {content}\n'
text += f'- Quantidade de questões: {quantity}\n'
text += f'- Questões de múltipla escolha: {multiple_choice}\n'
text += f'- Situação problema: {problem_situation}\n'
text += f'- Com as repostas/gabarito no final: {answer}\n'
text += f'- Questões de bancas de concurso público: {competition} e a descrição no início do enunciado quando for o caso\n'
submitted = st.form_submit_button('Solicitar Questões')
if submitted:
generate_response(text)
| [] |
2024-01-10 | Boostchat-ai/litellm | litellm~tests~test_router.py | #### What this tests ####
#This tests litellm router
import sys, os, time
import traceback, asyncio
import pytest
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import litellm
from litellm import Router
from concurrent.futures import ThreadPoolExecutor
from collections import defaultdict
from dotenv import load_dotenv
load_dotenv()
def test_exception_raising():
# this tests if the router raises an exception when invalid params are set
# in this test both deployments have bad keys - Keep this test. It validates if the router raises the most recent exception
litellm.set_verbose=True
import openai
try:
print("testing if router raises an exception")
old_api_key = os.environ["AZURE_API_KEY"]
os.environ["AZURE_API_KEY"] = ""
model_list = [
{
"model_name": "gpt-3.5-turbo", # openai model name
"litellm_params": { # params for litellm completion/embedding call
"model": "azure/chatgpt-v-2",
"api_key": "bad-key",
"api_version": os.getenv("AZURE_API_VERSION"),
"api_base": os.getenv("AZURE_API_BASE")
},
"tpm": 240000,
"rpm": 1800
},
{
"model_name": "gpt-3.5-turbo", # openai model name
"litellm_params": { #
"model": "gpt-3.5-turbo",
"api_key": "bad-key",
},
"tpm": 240000,
"rpm": 1800
}
]
router = Router(model_list=model_list,
redis_host=os.getenv("REDIS_HOST"),
redis_password=os.getenv("REDIS_PASSWORD"),
redis_port=int(os.getenv("REDIS_PORT")),
routing_strategy="simple-shuffle",
set_verbose=False,
num_retries=1) # type: ignore
response = router.completion(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "hello this request will fail"
}
]
)
os.environ["AZURE_API_KEY"] = old_api_key
pytest.fail(f"Should have raised an Auth Error")
except openai.AuthenticationError:
print("Test Passed: Caught an OPENAI AUTH Error, Good job. This is what we needed!")
os.environ["AZURE_API_KEY"] = old_api_key
router.reset()
except Exception as e:
os.environ["AZURE_API_KEY"] = old_api_key
print("Got unexpected exception on router!", e)
# test_exception_raising()
def test_reading_key_from_model_list():
# [PROD TEST CASE]
# this tests if the router can read key from model list and make completion call, and completion + stream call. This is 90% of the router use case
# DO NOT REMOVE THIS TEST. It's an IMP ONE. Speak to Ishaan, if you are tring to remove this
litellm.set_verbose=False
import openai
try:
print("testing if router raises an exception")
old_api_key = os.environ["AZURE_API_KEY"]
os.environ.pop("AZURE_API_KEY", None)
model_list = [
{
"model_name": "gpt-3.5-turbo", # openai model name
"litellm_params": { # params for litellm completion/embedding call
"model": "azure/chatgpt-v-2",
"api_key": old_api_key,
"api_version": os.getenv("AZURE_API_VERSION"),
"api_base": os.getenv("AZURE_API_BASE")
},
"tpm": 240000,
"rpm": 1800
}
]
router = Router(model_list=model_list,
redis_host=os.getenv("REDIS_HOST"),
redis_password=os.getenv("REDIS_PASSWORD"),
redis_port=int(os.getenv("REDIS_PORT")),
routing_strategy="simple-shuffle",
set_verbose=True,
num_retries=1) # type: ignore
response = router.completion(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "hello this request will fail"
}
]
)
print("\n response", response)
str_response = response.choices[0].message.content
print("\n str_response", str_response)
assert len(str_response) > 0
print("\n Testing streaming response")
response = router.completion(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "hello this request will fail"
}
],
stream=True
)
completed_response = ""
for chunk in response:
if chunk is not None:
print(chunk)
completed_response += chunk.choices[0].delta.content or ""
print("\n completed_response", completed_response)
assert len(completed_response) > 0
print("\n Passed Streaming")
os.environ["AZURE_API_KEY"] = old_api_key
router.reset()
except Exception as e:
os.environ["AZURE_API_KEY"] = old_api_key
print(f"FAILED TEST")
pytest.fail(f"Got unexpected exception on router! - {e}")
# test_reading_key_from_model_list()
def test_call_one_endpoint():
# [PROD TEST CASE]
# user passes one deployment they want to call on the router, we call the specified one
# this test makes a completion calls azure/chatgpt-v-2, it should work
try:
print("Testing calling a specific deployment")
old_api_key = os.environ["AZURE_API_KEY"]
model_list = [
{
"model_name": "gpt-3.5-turbo", # openai model name
"litellm_params": { # params for litellm completion/embedding call
"model": "azure/chatgpt-v-2",
"api_key": old_api_key,
"api_version": os.getenv("AZURE_API_VERSION"),
"api_base": os.getenv("AZURE_API_BASE")
},
"tpm": 240000,
"rpm": 1800
},
{
"model_name": "claude-v1",
"litellm_params": {
"model": "bedrock/anthropic.claude-instant-v1",
},
"tpm": 100000,
"rpm": 10000,
},
{
"model_name": "text-embedding-ada-002",
"litellm_params": {
"model": "azure/azure-embedding-model",
"api_key":os.environ['AZURE_API_KEY'],
"api_base": os.environ['AZURE_API_BASE']
},
"tpm": 100000,
"rpm": 10000,
},
]
litellm.set_verbose=True
router = Router(model_list=model_list,
routing_strategy="simple-shuffle",
set_verbose=True,
num_retries=1) # type: ignore
old_api_base = os.environ.pop("AZURE_API_BASE", None)
async def call_azure_completion():
response = await router.acompletion(
model="azure/chatgpt-v-2",
messages=[
{
"role": "user",
"content": "hello this request will pass"
}
],
specific_deployment=True
)
print("\n response", response)
async def call_bedrock_claude():
response = await router.acompletion(
model="bedrock/anthropic.claude-instant-v1",
messages=[
{
"role": "user",
"content": "hello this request will pass"
}
],
specific_deployment=True
)
print("\n response", response)
async def call_azure_embedding():
response = await router.aembedding(
model="azure/azure-embedding-model",
input = ["good morning from litellm"],
specific_deployment=True
)
print("\n response", response)
asyncio.run(call_azure_completion())
asyncio.run(call_bedrock_claude())
asyncio.run(call_azure_embedding())
os.environ["AZURE_API_BASE"] = old_api_base
os.environ["AZURE_API_KEY"] = old_api_key
except Exception as e:
print(f"FAILED TEST")
pytest.fail(f"Got unexpected exception on router! - {e}")
# test_call_one_endpoint()
def test_router_azure_acompletion():
# [PROD TEST CASE]
# This is 90% of the router use case, makes an acompletion call, acompletion + stream call and verifies it got a response
# DO NOT REMOVE THIS TEST. It's an IMP ONE. Speak to Ishaan, if you are tring to remove this
litellm.set_verbose=False
import openai
try:
print("Router Test Azure - Acompletion, Acompletion with stream")
# remove api key from env to repro how proxy passes key to router
old_api_key = os.environ["AZURE_API_KEY"]
os.environ.pop("AZURE_API_KEY", None)
model_list = [
{
"model_name": "gpt-3.5-turbo", # openai model name
"litellm_params": { # params for litellm completion/embedding call
"model": "azure/chatgpt-v-2",
"api_key": old_api_key,
"api_version": os.getenv("AZURE_API_VERSION"),
"api_base": os.getenv("AZURE_API_BASE")
},
"rpm": 1800
},
{
"model_name": "gpt-3.5-turbo", # openai model name
"litellm_params": { # params for litellm completion/embedding call
"model": "azure/gpt-turbo",
"api_key": os.getenv("AZURE_FRANCE_API_KEY"),
"api_version": os.getenv("AZURE_API_VERSION"),
"api_base": "https://openai-france-1234.openai.azure.com"
},
"rpm": 1800
}
]
router = Router(model_list=model_list,
routing_strategy="simple-shuffle",
set_verbose=True
) # type: ignore
async def test1():
response = await router.acompletion(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "hello this request will pass"
}
]
)
str_response = response.choices[0].message.content
print("\n str_response", str_response)
assert len(str_response) > 0
print("\n response", response)
asyncio.run(test1())
print("\n Testing streaming response")
async def test2():
response = await router.acompletion(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "hello this request will fail"
}
],
stream=True
)
completed_response = ""
async for chunk in response:
if chunk is not None:
print(chunk)
completed_response += chunk.choices[0].delta.content or ""
print("\n completed_response", completed_response)
assert len(completed_response) > 0
asyncio.run(test2())
print("\n Passed Streaming")
os.environ["AZURE_API_KEY"] = old_api_key
router.reset()
except Exception as e:
os.environ["AZURE_API_KEY"] = old_api_key
print(f"FAILED TEST")
pytest.fail(f"Got unexpected exception on router! - {e}")
# test_router_azure_acompletion()
### FUNCTION CALLING
def test_function_calling():
model_list = [
{
"model_name": "gpt-3.5-turbo-0613",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"api_key": os.getenv("OPENAI_API_KEY"),
},
"tpm": 100000,
"rpm": 10000,
},
]
messages = [
{"role": "user", "content": "What is the weather like in Boston?"}
]
functions = [
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
]
router = Router(model_list=model_list)
response = router.completion(model="gpt-3.5-turbo-0613", messages=messages, functions=functions)
router.reset()
print(response)
def test_acompletion_on_router():
# tests acompletion + caching on router
try:
litellm.set_verbose = True
model_list = [
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"api_key": os.getenv("OPENAI_API_KEY"),
},
"tpm": 100000,
"rpm": 10000,
},
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "azure/chatgpt-v-2",
"api_key": os.getenv("AZURE_API_KEY"),
"api_base": os.getenv("AZURE_API_BASE"),
"api_version": os.getenv("AZURE_API_VERSION")
},
"tpm": 100000,
"rpm": 10000,
}
]
messages = [
{"role": "user", "content": f"write a one sentence poem {time.time()}?"}
]
start_time = time.time()
router = Router(model_list=model_list,
redis_host=os.environ["REDIS_HOST"],
redis_password=os.environ["REDIS_PASSWORD"],
redis_port=os.environ["REDIS_PORT"],
cache_responses=True,
timeout=30,
routing_strategy="simple-shuffle")
async def get_response():
print("Testing acompletion + caching on router")
response1 = await router.acompletion(model="gpt-3.5-turbo", messages=messages, temperature=1)
print(f"response1: {response1}")
response2 = await router.acompletion(model="gpt-3.5-turbo", messages=messages, temperature=1)
print(f"response2: {response2}")
assert response1.id == response2.id
assert len(response1.choices[0].message.content) > 0
assert response1.choices[0].message.content == response2.choices[0].message.content
asyncio.run(get_response())
router.reset()
except litellm.Timeout as e:
end_time = time.time()
print(f"timeout error occurred: {end_time - start_time}")
pass
except Exception as e:
traceback.print_exc()
pytest.fail(f"Error occurred: {e}")
test_acompletion_on_router()
def test_function_calling_on_router():
try:
litellm.set_verbose = True
model_list = [
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo-0613",
"api_key": os.getenv("OPENAI_API_KEY"),
},
},
]
function1 = [
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
}
]
router = Router(
model_list=model_list,
redis_host=os.getenv("REDIS_HOST"),
redis_password=os.getenv("REDIS_PASSWORD"),
redis_port=os.getenv("REDIS_PORT")
)
messages=[
{
"role": "user",
"content": "what's the weather in boston"
}
]
response = router.completion(model="gpt-3.5-turbo", messages=messages, functions=function1)
print(f"final returned response: {response}")
router.reset()
assert isinstance(response["choices"][0]["message"]["function_call"], dict)
except Exception as e:
print(f"An exception occurred: {e}")
# test_function_calling_on_router()
def test_aembedding_on_router():
litellm.set_verbose = True
try:
model_list = [
{
"model_name": "text-embedding-ada-002",
"litellm_params": {
"model": "text-embedding-ada-002",
},
"tpm": 100000,
"rpm": 10000,
},
]
router = Router(model_list=model_list)
async def embedding_call():
response = await router.aembedding(
model="text-embedding-ada-002",
input=["good morning from litellm", "this is another item"],
)
print(response)
asyncio.run(embedding_call())
print("\n Making sync Embedding call\n")
response = router.embedding(
model="text-embedding-ada-002",
input=["good morning from litellm 2"],
)
router.reset()
except Exception as e:
traceback.print_exc()
pytest.fail(f"Error occurred: {e}")
# test_aembedding_on_router()
def test_azure_embedding_on_router():
"""
[PROD Use Case] - Makes an aembedding call + embedding call
"""
litellm.set_verbose = True
try:
model_list = [
{
"model_name": "text-embedding-ada-002",
"litellm_params": {
"model": "azure/azure-embedding-model",
"api_key":os.environ['AZURE_API_KEY'],
"api_base": os.environ['AZURE_API_BASE']
},
"tpm": 100000,
"rpm": 10000,
},
]
router = Router(model_list=model_list)
async def embedding_call():
response = await router.aembedding(
model="text-embedding-ada-002",
input=["good morning from litellm"]
)
print(response)
asyncio.run(embedding_call())
print("\n Making sync Azure Embedding call\n")
response = router.embedding(
model="text-embedding-ada-002",
input=["test 2 from litellm. async embedding"]
)
print(response)
router.reset()
except Exception as e:
traceback.print_exc()
pytest.fail(f"Error occurred: {e}")
# test_azure_embedding_on_router()
def test_bedrock_on_router():
litellm.set_verbose = True
print("\n Testing bedrock on router\n")
try:
model_list = [
{
"model_name": "claude-v1",
"litellm_params": {
"model": "bedrock/anthropic.claude-instant-v1",
},
"tpm": 100000,
"rpm": 10000,
},
]
async def test():
router = Router(model_list=model_list)
response = await router.acompletion(
model="claude-v1",
messages=[
{
"role": "user",
"content": "hello from litellm test",
}
]
)
print(response)
router.reset()
asyncio.run(test())
except Exception as e:
traceback.print_exc()
pytest.fail(f"Error occurred: {e}")
# test_bedrock_on_router()
def test_openai_completion_on_router():
# [PROD Use Case] - Makes an acompletion call + async acompletion call, and sync acompletion call, sync completion + stream
# 4 LLM API calls made here. If it fails, add retries. Do not remove this test.
litellm.set_verbose = True
print("\n Testing OpenAI on router\n")
try:
model_list = [
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo",
},
},
]
router = Router(model_list=model_list)
async def test():
response = await router.acompletion(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "hello from litellm test",
}
]
)
print(response)
assert len(response.choices[0].message.content) > 0
print("\n streaming + acompletion test")
response = await router.acompletion(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": f"hello from litellm test {time.time()}",
}
],
stream=True
)
complete_response = ""
print(response)
# if you want to see all the attributes and methods
async for chunk in response:
print(chunk)
complete_response += chunk.choices[0].delta.content or ""
print("\n complete response: ", complete_response)
assert len(complete_response) > 0
asyncio.run(test())
print("\n Testing Sync completion calls \n")
response = router.completion(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "hello from litellm test2",
}
]
)
print(response)
assert len(response.choices[0].message.content) > 0
print("\n streaming + completion test")
response = router.completion(
model="gpt-3.5-turbo",
messages=[
{
"role": "user",
"content": "hello from litellm test3",
}
],
stream=True
)
complete_response = ""
print(response)
for chunk in response:
print(chunk)
complete_response += chunk.choices[0].delta.content or ""
print("\n complete response: ", complete_response)
assert len(complete_response) > 0
router.reset()
except Exception as e:
traceback.print_exc()
pytest.fail(f"Error occurred: {e}")
# test_openai_completion_on_router()
def test_reading_keys_os_environ():
import openai
try:
model_list = [
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo",
"api_key": "os.environ/AZURE_API_KEY",
"api_base": "os.environ/AZURE_API_BASE",
"api_version": "os.environ/AZURE_API_VERSION",
"timeout": "os.environ/AZURE_TIMEOUT",
"stream_timeout": "os.environ/AZURE_STREAM_TIMEOUT",
"max_retries": "os.environ/AZURE_MAX_RETRIES",
},
},
]
router = Router(model_list=model_list)
for model in router.model_list:
assert model["litellm_params"]["api_key"] == os.environ["AZURE_API_KEY"], f"{model['litellm_params']['api_key']} vs {os.environ['AZURE_API_KEY']}"
assert model["litellm_params"]["api_base"] == os.environ["AZURE_API_BASE"], f"{model['litellm_params']['api_base']} vs {os.environ['AZURE_API_BASE']}"
assert model["litellm_params"]["api_version"] == os.environ["AZURE_API_VERSION"], f"{model['litellm_params']['api_version']} vs {os.environ['AZURE_API_VERSION']}"
assert float(model["litellm_params"]["timeout"]) == float(os.environ["AZURE_TIMEOUT"]), f"{model['litellm_params']['timeout']} vs {os.environ['AZURE_TIMEOUT']}"
assert float(model["litellm_params"]["stream_timeout"]) == float(os.environ["AZURE_STREAM_TIMEOUT"]), f"{model['litellm_params']['stream_timeout']} vs {os.environ['AZURE_STREAM_TIMEOUT']}"
assert int(model["litellm_params"]["max_retries"]) == int(os.environ["AZURE_MAX_RETRIES"]), f"{model['litellm_params']['max_retries']} vs {os.environ['AZURE_MAX_RETRIES']}"
print("passed testing of reading keys from os.environ")
async_client: openai.AsyncAzureOpenAI = model["async_client"] # type: ignore
assert async_client.api_key == os.environ["AZURE_API_KEY"]
assert async_client.base_url == os.environ["AZURE_API_BASE"]
assert async_client.max_retries == (os.environ["AZURE_MAX_RETRIES"]), f"{async_client.max_retries} vs {os.environ['AZURE_MAX_RETRIES']}"
assert async_client.timeout == (os.environ["AZURE_TIMEOUT"]), f"{async_client.timeout} vs {os.environ['AZURE_TIMEOUT']}"
print("async client set correctly!")
print("\n Testing async streaming client")
stream_async_client: openai.AsyncAzureOpenAI = model["stream_async_client"] # type: ignore
assert stream_async_client.api_key == os.environ["AZURE_API_KEY"]
assert stream_async_client.base_url == os.environ["AZURE_API_BASE"]
assert stream_async_client.max_retries == (os.environ["AZURE_MAX_RETRIES"]), f"{stream_async_client.max_retries} vs {os.environ['AZURE_MAX_RETRIES']}"
assert stream_async_client.timeout == (os.environ["AZURE_STREAM_TIMEOUT"]), f"{stream_async_client.timeout} vs {os.environ['AZURE_TIMEOUT']}"
print("async stream client set correctly!")
print("\n Testing sync client")
client: openai.AzureOpenAI = model["client"] # type: ignore
assert client.api_key == os.environ["AZURE_API_KEY"]
assert client.base_url == os.environ["AZURE_API_BASE"]
assert client.max_retries == (os.environ["AZURE_MAX_RETRIES"]), f"{client.max_retries} vs {os.environ['AZURE_MAX_RETRIES']}"
assert client.timeout == (os.environ["AZURE_TIMEOUT"]), f"{client.timeout} vs {os.environ['AZURE_TIMEOUT']}"
print("sync client set correctly!")
print("\n Testing sync stream client")
stream_client: openai.AzureOpenAI = model["stream_client"] # type: ignore
assert stream_client.api_key == os.environ["AZURE_API_KEY"]
assert stream_client.base_url == os.environ["AZURE_API_BASE"]
assert stream_client.max_retries == (os.environ["AZURE_MAX_RETRIES"]), f"{stream_client.max_retries} vs {os.environ['AZURE_MAX_RETRIES']}"
assert stream_client.timeout == (os.environ["AZURE_STREAM_TIMEOUT"]), f"{stream_client.timeout} vs {os.environ['AZURE_TIMEOUT']}"
print("sync stream client set correctly!")
router.reset()
except Exception as e:
traceback.print_exc()
pytest.fail(f"Error occurred: {e}")
# test_reading_keys_os_environ()
def test_reading_openai_keys_os_environ():
import openai
try:
model_list = [
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo",
"api_key": "os.environ/OPENAI_API_KEY",
"timeout": "os.environ/AZURE_TIMEOUT",
"stream_timeout": "os.environ/AZURE_STREAM_TIMEOUT",
"max_retries": "os.environ/AZURE_MAX_RETRIES",
},
},
{
"model_name": "text-embedding-ada-002",
"litellm_params": {
"model": "text-embedding-ada-002",
"api_key": "os.environ/OPENAI_API_KEY",
"timeout": "os.environ/AZURE_TIMEOUT",
"stream_timeout": "os.environ/AZURE_STREAM_TIMEOUT",
"max_retries": "os.environ/AZURE_MAX_RETRIES",
},
},
]
router = Router(model_list=model_list)
for model in router.model_list:
assert model["litellm_params"]["api_key"] == os.environ["OPENAI_API_KEY"], f"{model['litellm_params']['api_key']} vs {os.environ['AZURE_API_KEY']}"
assert float(model["litellm_params"]["timeout"]) == float(os.environ["AZURE_TIMEOUT"]), f"{model['litellm_params']['timeout']} vs {os.environ['AZURE_TIMEOUT']}"
assert float(model["litellm_params"]["stream_timeout"]) == float(os.environ["AZURE_STREAM_TIMEOUT"]), f"{model['litellm_params']['stream_timeout']} vs {os.environ['AZURE_STREAM_TIMEOUT']}"
assert int(model["litellm_params"]["max_retries"]) == int(os.environ["AZURE_MAX_RETRIES"]), f"{model['litellm_params']['max_retries']} vs {os.environ['AZURE_MAX_RETRIES']}"
print("passed testing of reading keys from os.environ")
async_client: openai.AsyncOpenAI = model["async_client"] # type: ignore
assert async_client.api_key == os.environ["OPENAI_API_KEY"]
assert async_client.max_retries == (os.environ["AZURE_MAX_RETRIES"]), f"{async_client.max_retries} vs {os.environ['AZURE_MAX_RETRIES']}"
assert async_client.timeout == (os.environ["AZURE_TIMEOUT"]), f"{async_client.timeout} vs {os.environ['AZURE_TIMEOUT']}"
print("async client set correctly!")
print("\n Testing async streaming client")
stream_async_client: openai.AsyncOpenAI = model["stream_async_client"] # type: ignore
assert stream_async_client.api_key == os.environ["OPENAI_API_KEY"]
assert stream_async_client.max_retries == (os.environ["AZURE_MAX_RETRIES"]), f"{stream_async_client.max_retries} vs {os.environ['AZURE_MAX_RETRIES']}"
assert stream_async_client.timeout == (os.environ["AZURE_STREAM_TIMEOUT"]), f"{stream_async_client.timeout} vs {os.environ['AZURE_TIMEOUT']}"
print("async stream client set correctly!")
print("\n Testing sync client")
client: openai.AzureOpenAI = model["client"] # type: ignore
assert client.api_key == os.environ["OPENAI_API_KEY"]
assert client.max_retries == (os.environ["AZURE_MAX_RETRIES"]), f"{client.max_retries} vs {os.environ['AZURE_MAX_RETRIES']}"
assert client.timeout == (os.environ["AZURE_TIMEOUT"]), f"{client.timeout} vs {os.environ['AZURE_TIMEOUT']}"
print("sync client set correctly!")
print("\n Testing sync stream client")
stream_client: openai.AzureOpenAI = model["stream_client"] # type: ignore
assert stream_client.api_key == os.environ["OPENAI_API_KEY"]
assert stream_client.max_retries == (os.environ["AZURE_MAX_RETRIES"]), f"{stream_client.max_retries} vs {os.environ['AZURE_MAX_RETRIES']}"
assert stream_client.timeout == (os.environ["AZURE_STREAM_TIMEOUT"]), f"{stream_client.timeout} vs {os.environ['AZURE_TIMEOUT']}"
print("sync stream client set correctly!")
router.reset()
except Exception as e:
traceback.print_exc()
pytest.fail(f"Error occurred: {e}")
# test_reading_openai_keys_os_environ() | [
"hello this request will pass",
"hello from litellm test",
"what's the weather in boston",
"What is the weather like in Boston?",
"hello this request will fail",
"hello from litellm test2",
"hello from litellm test3"
] |
2024-01-10 | Boostchat-ai/litellm | litellm~router.py | # +-----------------------------------------------+
# | |
# | Give Feedback / Get Help |
# | https://github.com/BerriAI/litellm/issues/new |
# | |
# +-----------------------------------------------+
#
# Thank you ! We ❤️ you! - Krrish & Ishaan
import copy
from datetime import datetime
from typing import Dict, List, Optional, Union, Literal, Any
import random, threading, time, traceback, uuid
import litellm, openai
from litellm.caching import RedisCache, InMemoryCache, DualCache
import logging, asyncio
import inspect, concurrent
from openai import AsyncOpenAI
from collections import defaultdict
from litellm.router_strategy.least_busy import LeastBusyLoggingHandler
import copy
class Router:
"""
Example usage:
```python
from litellm import Router
model_list = [
{
"model_name": "azure-gpt-3.5-turbo", # model alias
"litellm_params": { # params for litellm completion/embedding call
"model": "azure/<your-deployment-name-1>",
"api_key": <your-api-key>,
"api_version": <your-api-version>,
"api_base": <your-api-base>
},
},
{
"model_name": "azure-gpt-3.5-turbo", # model alias
"litellm_params": { # params for litellm completion/embedding call
"model": "azure/<your-deployment-name-2>",
"api_key": <your-api-key>,
"api_version": <your-api-version>,
"api_base": <your-api-base>
},
},
{
"model_name": "openai-gpt-3.5-turbo", # model alias
"litellm_params": { # params for litellm completion/embedding call
"model": "gpt-3.5-turbo",
"api_key": <your-api-key>,
},
]
router = Router(model_list=model_list, fallbacks=[{"azure-gpt-3.5-turbo": "openai-gpt-3.5-turbo"}])
```
"""
model_names: List = []
cache_responses: Optional[bool] = False
default_cache_time_seconds: int = 1 * 60 * 60 # 1 hour
num_retries: int = 0
tenacity = None
leastbusy_logger: Optional[LeastBusyLoggingHandler] = None
def __init__(self,
model_list: Optional[list] = None,
## CACHING ##
redis_url: Optional[str] = None,
redis_host: Optional[str] = None,
redis_port: Optional[int] = None,
redis_password: Optional[str] = None,
cache_responses: Optional[bool] = False,
cache_kwargs: dict = {}, # additional kwargs to pass to RedisCache (see caching.py)
## RELIABILITY ##
num_retries: int = 0,
timeout: Optional[float] = None,
default_litellm_params = {}, # default params for Router.chat.completion.create
set_verbose: bool = False,
fallbacks: List = [],
allowed_fails: Optional[int] = None,
context_window_fallbacks: List = [],
routing_strategy: Literal["simple-shuffle", "least-busy", "usage-based-routing", "latency-based-routing"] = "simple-shuffle") -> None:
self.set_verbose = set_verbose
self.deployment_names: List = [] # names of models under litellm_params. ex. azure/chatgpt-v-2
if model_list:
model_list = copy.deepcopy(model_list)
self.set_model_list(model_list)
self.healthy_deployments: List = self.model_list
self.deployment_latency_map = {}
for m in model_list:
self.deployment_latency_map[m["litellm_params"]["model"]] = 0
self.allowed_fails = allowed_fails or litellm.allowed_fails
self.failed_calls = InMemoryCache() # cache to track failed call per deployment, if num failed calls within 1 minute > allowed fails, then add it to cooldown
self.num_retries = num_retries or litellm.num_retries or 0
self.timeout = timeout or litellm.request_timeout
self.routing_strategy = routing_strategy
self.fallbacks = fallbacks or litellm.fallbacks
self.context_window_fallbacks = context_window_fallbacks or litellm.context_window_fallbacks
self.model_exception_map: dict = {} # dict to store model: list exceptions. self.exceptions = {"gpt-3.5": ["API KEY Error", "Rate Limit Error", "good morning error"]}
self.total_calls: defaultdict = defaultdict(int) # dict to store total calls made to each model
self.fail_calls: defaultdict = defaultdict(int) # dict to store fail_calls made to each model
self.success_calls: defaultdict = defaultdict(int) # dict to store success_calls made to each model
self.previous_models: List = [] # list to store failed calls (passed in as metadata to next call)
# make Router.chat.completions.create compatible for openai.chat.completions.create
self.chat = litellm.Chat(params=default_litellm_params)
# default litellm args
self.default_litellm_params = default_litellm_params
self.default_litellm_params.setdefault("timeout", timeout)
self.default_litellm_params.setdefault("max_retries", 0)
### CACHING ###
cache_type = "local" # default to an in-memory cache
redis_cache = None
cache_config = {}
if redis_url is not None or (redis_host is not None and redis_port is not None and redis_password is not None):
cache_type = "redis"
if redis_url is not None:
cache_config['url'] = redis_url
if redis_host is not None:
cache_config['host'] = redis_host
if redis_port is not None:
cache_config['port'] = str(redis_port) # type: ignore
if redis_password is not None:
cache_config['password'] = redis_password
# Add additional key-value pairs from cache_kwargs
cache_config.update(cache_kwargs)
redis_cache = RedisCache(**cache_config)
if cache_responses:
if litellm.cache is None:
# the cache can be initialized on the proxy server. We should not overwrite it
litellm.cache = litellm.Cache(type=cache_type, **cache_config)
self.cache_responses = cache_responses
self.cache = DualCache(redis_cache=redis_cache, in_memory_cache=InMemoryCache()) # use a dual cache (Redis+In-Memory) for tracking cooldowns, usage, etc.
### ROUTING SETUP ###
if routing_strategy == "least-busy":
self.leastbusy_logger = LeastBusyLoggingHandler(router_cache=self.cache)
## add callback
if isinstance(litellm.input_callback, list):
litellm.input_callback.append(self.leastbusy_logger) # type: ignore
else:
litellm.input_callback = [self.leastbusy_logger] # type: ignore
if isinstance(litellm.callbacks, list):
litellm.callbacks.append(self.leastbusy_logger) # type: ignore
## USAGE TRACKING ##
if isinstance(litellm.success_callback, list):
litellm.success_callback.append(self.deployment_callback)
else:
litellm.success_callback = [self.deployment_callback]
if isinstance(litellm.failure_callback, list):
litellm.failure_callback.append(self.deployment_callback_on_failure)
else:
litellm.failure_callback = [self.deployment_callback_on_failure]
self.print_verbose(f"Intialized router with Routing strategy: {self.routing_strategy}\n")
### COMPLETION + EMBEDDING FUNCTIONS
def completion(self,
model: str,
messages: List[Dict[str, str]],
**kwargs):
"""
Example usage:
response = router.completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hey, how's it going?"}]
"""
try:
kwargs["model"] = model
kwargs["messages"] = messages
kwargs["original_function"] = self._completion
timeout = kwargs.get("request_timeout", self.timeout)
kwargs["num_retries"] = kwargs.get("num_retries", self.num_retries)
kwargs.setdefault("metadata", {}).update({"model_group": model})
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
# Submit the function to the executor with a timeout
future = executor.submit(self.function_with_fallbacks, **kwargs)
response = future.result(timeout=timeout) # type: ignore
return response
except Exception as e:
raise e
def _completion(
self,
model: str,
messages: List[Dict[str, str]],
**kwargs):
try:
# pick the one that is available (lowest TPM/RPM)
deployment = self.get_available_deployment(model=model, messages=messages, specific_deployment=kwargs.pop("specific_deployment", None))
kwargs.setdefault("metadata", {}).update({"deployment": deployment["litellm_params"]["model"]})
data = deployment["litellm_params"].copy()
kwargs["model_info"] = deployment.get("model_info", {})
for k, v in self.default_litellm_params.items():
if k not in data: # prioritize model-specific params > default router params
data[k] = v
########## remove -ModelID-XXXX from model ##############
original_model_string = data["model"]
# Find the index of "ModelID" in the string
self.print_verbose(f"completion model: {original_model_string}")
index_of_model_id = original_model_string.find("-ModelID")
# Remove everything after "-ModelID" if it exists
if index_of_model_id != -1:
data["model"] = original_model_string[:index_of_model_id]
else:
data["model"] = original_model_string
model_client = self._get_client(deployment=deployment, kwargs=kwargs)
return litellm.completion(**{**data, "messages": messages, "caching": self.cache_responses, "client": model_client, **kwargs})
except Exception as e:
raise e
async def acompletion(self,
model: str,
messages: List[Dict[str, str]],
**kwargs):
try:
kwargs["model"] = model
kwargs["messages"] = messages
kwargs["original_function"] = self._acompletion
kwargs["num_retries"] = kwargs.get("num_retries", self.num_retries)
timeout = kwargs.get("request_timeout", self.timeout)
kwargs.setdefault("metadata", {}).update({"model_group": model})
# response = await asyncio.wait_for(self.async_function_with_fallbacks(**kwargs), timeout=timeout)
response = await self.async_function_with_fallbacks(**kwargs)
return response
except Exception as e:
raise e
async def _acompletion(
self,
model: str,
messages: List[Dict[str, str]],
**kwargs):
try:
self.print_verbose(f"Inside _acompletion()- model: {model}; kwargs: {kwargs}")
original_model_string = None # set a default for this variable
deployment = self.get_available_deployment(model=model, messages=messages, specific_deployment=kwargs.pop("specific_deployment", None))
kwargs.setdefault("metadata", {}).update({"deployment": deployment["litellm_params"]["model"]})
kwargs["model_info"] = deployment.get("model_info", {})
data = deployment["litellm_params"].copy()
for k, v in self.default_litellm_params.items():
if k not in data: # prioritize model-specific params > default router params
data[k] = v
########## remove -ModelID-XXXX from model ##############
original_model_string = data["model"]
# Find the index of "ModelID" in the string
index_of_model_id = original_model_string.find("-ModelID")
# Remove everything after "-ModelID" if it exists
if index_of_model_id != -1:
data["model"] = original_model_string[:index_of_model_id]
else:
data["model"] = original_model_string
model_client = self._get_client(deployment=deployment, kwargs=kwargs, client_type="async")
self.total_calls[original_model_string] +=1
response = await litellm.acompletion(**{**data, "messages": messages, "caching": self.cache_responses, "client": model_client, **kwargs})
self.success_calls[original_model_string] +=1
return response
except Exception as e:
if original_model_string is not None:
self.fail_calls[original_model_string] +=1
raise e
def text_completion(self,
model: str,
prompt: str,
is_retry: Optional[bool] = False,
is_fallback: Optional[bool] = False,
is_async: Optional[bool] = False,
**kwargs):
try:
kwargs.setdefault("metadata", {}).update({"model_group": model})
messages=[{"role": "user", "content": prompt}]
# pick the one that is available (lowest TPM/RPM)
deployment = self.get_available_deployment(model=model, messages=messages, specific_deployment=kwargs.pop("specific_deployment", None))
data = deployment["litellm_params"].copy()
for k, v in self.default_litellm_params.items():
if k not in data: # prioritize model-specific params > default router params
data[k] = v
########## remove -ModelID-XXXX from model ##############
original_model_string = data["model"]
# Find the index of "ModelID" in the string
index_of_model_id = original_model_string.find("-ModelID")
# Remove everything after "-ModelID" if it exists
if index_of_model_id != -1:
data["model"] = original_model_string[:index_of_model_id]
else:
data["model"] = original_model_string
# call via litellm.completion()
return litellm.text_completion(**{**data, "prompt": prompt, "caching": self.cache_responses, **kwargs}) # type: ignore
except Exception as e:
if self.num_retries > 0:
kwargs["model"] = model
kwargs["messages"] = messages
kwargs["original_exception"] = e
kwargs["original_function"] = self.completion
return self.function_with_retries(**kwargs)
else:
raise e
def embedding(self,
model: str,
input: Union[str, List],
is_async: Optional[bool] = False,
**kwargs) -> Union[List[float], None]:
# pick the one that is available (lowest TPM/RPM)
deployment = self.get_available_deployment(model=model, input=input, specific_deployment=kwargs.pop("specific_deployment", None))
kwargs.setdefault("model_info", {})
kwargs.setdefault("metadata", {}).update({"model_group": model, "deployment": deployment["litellm_params"]["model"]}) # [TODO]: move to using async_function_with_fallbacks
data = deployment["litellm_params"].copy()
for k, v in self.default_litellm_params.items():
if k not in data: # prioritize model-specific params > default router params
data[k] = v
########## remove -ModelID-XXXX from model ##############
original_model_string = data["model"]
# Find the index of "ModelID" in the string
index_of_model_id = original_model_string.find("-ModelID")
# Remove everything after "-ModelID" if it exists
if index_of_model_id != -1:
data["model"] = original_model_string[:index_of_model_id]
else:
data["model"] = original_model_string
model_client = self._get_client(deployment=deployment, kwargs=kwargs)
# call via litellm.embedding()
return litellm.embedding(**{**data, "input": input, "caching": self.cache_responses, "client": model_client, **kwargs})
async def aembedding(self,
model: str,
input: Union[str, List],
is_async: Optional[bool] = True,
**kwargs) -> Union[List[float], None]:
# pick the one that is available (lowest TPM/RPM)
deployment = self.get_available_deployment(model=model, input=input, specific_deployment=kwargs.pop("specific_deployment", None))
kwargs.setdefault("metadata", {}).update({"model_group": model, "deployment": deployment["litellm_params"]["model"]})
data = deployment["litellm_params"].copy()
kwargs["model_info"] = deployment.get("model_info", {})
for k, v in self.default_litellm_params.items():
if k not in data: # prioritize model-specific params > default router params
data[k] = v
########## remove -ModelID-XXXX from model ##############
original_model_string = data["model"]
# Find the index of "ModelID" in the string
index_of_model_id = original_model_string.find("-ModelID")
# Remove everything after "-ModelID" if it exists
if index_of_model_id != -1:
data["model"] = original_model_string[:index_of_model_id]
else:
data["model"] = original_model_string
model_client = self._get_client(deployment=deployment, kwargs=kwargs, client_type="async")
return await litellm.aembedding(**{**data, "input": input, "caching": self.cache_responses, "client": model_client, **kwargs})
async def async_function_with_fallbacks(self, *args, **kwargs):
"""
Try calling the function_with_retries
If it fails after num_retries, fall back to another model group
"""
model_group = kwargs.get("model")
fallbacks = kwargs.get("fallbacks", self.fallbacks)
context_window_fallbacks = kwargs.get("context_window_fallbacks", self.context_window_fallbacks)
try:
response = await self.async_function_with_retries(*args, **kwargs)
self.print_verbose(f'Async Response: {response}')
return response
except Exception as e:
self.print_verbose(f"An exception occurs: {e}")
original_exception = e
try:
self.print_verbose(f"Trying to fallback b/w models")
if isinstance(e, litellm.ContextWindowExceededError) and context_window_fallbacks is not None:
fallback_model_group = None
for item in context_window_fallbacks: # [{"gpt-3.5-turbo": ["gpt-4"]}]
if list(item.keys())[0] == model_group:
fallback_model_group = item[model_group]
break
if fallback_model_group is None:
raise original_exception
for mg in fallback_model_group:
"""
Iterate through the model groups and try calling that deployment
"""
try:
kwargs["model"] = mg
response = await self.async_function_with_retries(*args, **kwargs)
return response
except Exception as e:
pass
elif fallbacks is not None:
self.print_verbose(f"inside model fallbacks: {fallbacks}")
for item in fallbacks:
if list(item.keys())[0] == model_group:
fallback_model_group = item[model_group]
break
for mg in fallback_model_group:
"""
Iterate through the model groups and try calling that deployment
"""
try:
## LOGGING
kwargs = self.log_retry(kwargs=kwargs, e=original_exception)
kwargs["model"] = mg
kwargs["metadata"]["model_group"] = mg
response = await self.async_function_with_retries(*args, **kwargs)
return response
except Exception as e:
raise e
except Exception as e:
self.print_verbose(f"An exception occurred - {str(e)}")
traceback.print_exc()
raise original_exception
async def async_function_with_retries(self, *args, **kwargs):
self.print_verbose(f"Inside async function with retries: args - {args}; kwargs - {kwargs}")
backoff_factor = 1
original_function = kwargs.pop("original_function")
fallbacks = kwargs.pop("fallbacks", self.fallbacks)
context_window_fallbacks = kwargs.pop("context_window_fallbacks", self.context_window_fallbacks)
self.print_verbose(f"async function w/ retries: original_function - {original_function}")
num_retries = kwargs.pop("num_retries")
try:
# if the function call is successful, no exception will be raised and we'll break out of the loop
response = await original_function(*args, **kwargs)
return response
except Exception as e:
original_exception = e
### CHECK IF RATE LIMIT / CONTEXT WINDOW ERROR w/ fallbacks available
if ((isinstance(original_exception, litellm.ContextWindowExceededError) and context_window_fallbacks is None)
or (isinstance(original_exception, openai.RateLimitError) and fallbacks is not None)):
raise original_exception
### RETRY
#### check if it should retry + back-off if required
if "No models available" in str(e):
timeout = litellm._calculate_retry_after(remaining_retries=num_retries, max_retries=num_retries)
await asyncio.sleep(timeout)
elif hasattr(original_exception, "status_code") and hasattr(original_exception, "response") and litellm._should_retry(status_code=original_exception.status_code):
if hasattr(original_exception.response, "headers"):
timeout = litellm._calculate_retry_after(remaining_retries=num_retries, max_retries=num_retries, response_headers=original_exception.response.headers)
else:
timeout = litellm._calculate_retry_after(remaining_retries=num_retries, max_retries=num_retries)
await asyncio.sleep(timeout)
else:
raise original_exception
## LOGGING
if num_retries > 0:
kwargs = self.log_retry(kwargs=kwargs, e=original_exception)
for current_attempt in range(num_retries):
self.print_verbose(f"retrying request. Current attempt - {current_attempt}; num retries: {num_retries}")
try:
# if the function call is successful, no exception will be raised and we'll break out of the loop
response = await original_function(*args, **kwargs)
if inspect.iscoroutinefunction(response): # async errors are often returned as coroutines
response = await response
return response
except Exception as e:
## LOGGING
kwargs = self.log_retry(kwargs=kwargs, e=e)
remaining_retries = num_retries - current_attempt
if "No models available" in str(e):
timeout = litellm._calculate_retry_after(remaining_retries=remaining_retries, max_retries=num_retries, min_timeout=1)
await asyncio.sleep(timeout)
elif hasattr(e, "status_code") and hasattr(e, "response") and litellm._should_retry(status_code=e.status_code):
if hasattr(e.response, "headers"):
timeout = litellm._calculate_retry_after(remaining_retries=remaining_retries, max_retries=num_retries, response_headers=e.response.headers)
else:
timeout = litellm._calculate_retry_after(remaining_retries=remaining_retries, max_retries=num_retries)
await asyncio.sleep(timeout)
else:
raise e
raise original_exception
def function_with_fallbacks(self, *args, **kwargs):
"""
Try calling the function_with_retries
If it fails after num_retries, fall back to another model group
"""
model_group = kwargs.get("model")
fallbacks = kwargs.get("fallbacks", self.fallbacks)
context_window_fallbacks = kwargs.get("context_window_fallbacks", self.context_window_fallbacks)
try:
response = self.function_with_retries(*args, **kwargs)
return response
except Exception as e:
original_exception = e
self.print_verbose(f"An exception occurs {original_exception}")
try:
self.print_verbose(f"Trying to fallback b/w models. Initial model group: {model_group}")
if isinstance(e, litellm.ContextWindowExceededError) and context_window_fallbacks is not None:
fallback_model_group = None
for item in context_window_fallbacks: # [{"gpt-3.5-turbo": ["gpt-4"]}]
if list(item.keys())[0] == model_group:
fallback_model_group = item[model_group]
break
if fallback_model_group is None:
raise original_exception
for mg in fallback_model_group:
"""
Iterate through the model groups and try calling that deployment
"""
try:
## LOGGING
kwargs = self.log_retry(kwargs=kwargs, e=original_exception)
kwargs["model"] = mg
response = self.function_with_fallbacks(*args, **kwargs)
return response
except Exception as e:
pass
elif fallbacks is not None:
self.print_verbose(f"inside model fallbacks: {fallbacks}")
fallback_model_group = None
for item in fallbacks:
if list(item.keys())[0] == model_group:
fallback_model_group = item[model_group]
break
if fallback_model_group is None:
raise original_exception
for mg in fallback_model_group:
"""
Iterate through the model groups and try calling that deployment
"""
try:
## LOGGING
kwargs = self.log_retry(kwargs=kwargs, e=original_exception)
kwargs["model"] = mg
response = self.function_with_fallbacks(*args, **kwargs)
return response
except Exception as e:
raise e
except Exception as e:
raise e
raise original_exception
def function_with_retries(self, *args, **kwargs):
"""
Try calling the model 3 times. Shuffle between available deployments.
"""
self.print_verbose(f"Inside function with retries: args - {args}; kwargs - {kwargs}")
original_function = kwargs.pop("original_function")
num_retries = kwargs.pop("num_retries")
fallbacks = kwargs.pop("fallbacks", self.fallbacks)
context_window_fallbacks = kwargs.pop("context_window_fallbacks", self.context_window_fallbacks)
try:
# if the function call is successful, no exception will be raised and we'll break out of the loop
response = original_function(*args, **kwargs)
return response
except Exception as e:
original_exception = e
self.print_verbose(f"num retries in function with retries: {num_retries}")
### CHECK IF RATE LIMIT / CONTEXT WINDOW ERROR
if ((isinstance(original_exception, litellm.ContextWindowExceededError) and context_window_fallbacks is None)
or (isinstance(original_exception, openai.RateLimitError) and fallbacks is not None)):
raise original_exception
## LOGGING
if num_retries > 0:
kwargs = self.log_retry(kwargs=kwargs, e=original_exception)
### RETRY
for current_attempt in range(num_retries):
self.print_verbose(f"retrying request. Current attempt - {current_attempt}; retries left: {num_retries}")
try:
# if the function call is successful, no exception will be raised and we'll break out of the loop
response = original_function(*args, **kwargs)
return response
except Exception as e:
## LOGGING
kwargs = self.log_retry(kwargs=kwargs, e=e)
remaining_retries = num_retries - current_attempt
if "No models available" in str(e):
timeout = litellm._calculate_retry_after(remaining_retries=remaining_retries, max_retries=num_retries, min_timeout=1)
time.sleep(timeout)
elif hasattr(e, "status_code") and hasattr(e, "response") and litellm._should_retry(status_code=e.status_code):
if hasattr(e.response, "headers"):
timeout = litellm._calculate_retry_after(remaining_retries=remaining_retries, max_retries=num_retries, response_headers=e.response.headers)
else:
timeout = litellm._calculate_retry_after(remaining_retries=remaining_retries, max_retries=num_retries)
time.sleep(timeout)
else:
raise e
raise original_exception
### HELPER FUNCTIONS
def deployment_callback(
self,
kwargs, # kwargs to completion
completion_response, # response from completion
start_time, end_time # start/end time
):
"""
Function LiteLLM submits a callback to after a successful
completion. Purpose of this is to update TPM/RPM usage per model
"""
model_name = kwargs.get('model', None) # i.e. gpt35turbo
custom_llm_provider = kwargs.get("litellm_params", {}).get('custom_llm_provider', None) # i.e. azure
if custom_llm_provider:
model_name = f"{custom_llm_provider}/{model_name}"
if kwargs["stream"] is True:
if kwargs.get("complete_streaming_response"):
total_tokens = kwargs.get("complete_streaming_response")['usage']['total_tokens']
self._set_deployment_usage(model_name, total_tokens)
else:
total_tokens = completion_response['usage']['total_tokens']
self._set_deployment_usage(model_name, total_tokens)
self.deployment_latency_map[model_name] = (end_time - start_time).total_seconds()
def deployment_callback_on_failure(
self,
kwargs, # kwargs to completion
completion_response, # response from completion
start_time, end_time # start/end time
):
try:
exception = kwargs.get("exception", None)
exception_type = type(exception)
exception_status = getattr(exception, 'status_code', "")
exception_cause = getattr(exception, '__cause__', "")
exception_message = getattr(exception, 'message', "")
exception_str = str(exception_type) + "Status: " + str(exception_status) + "Message: " + str(exception_cause) + str(exception_message) + "Full exception" + str(exception)
model_name = kwargs.get('model', None) # i.e. gpt35turbo
custom_llm_provider = kwargs.get("litellm_params", {}).get('custom_llm_provider', None) # i.e. azure
metadata = kwargs.get("litellm_params", {}).get('metadata', None)
if metadata:
deployment = metadata.get("deployment", None)
self._set_cooldown_deployments(deployment)
deployment_exceptions = self.model_exception_map.get(deployment, [])
deployment_exceptions.append(exception_str)
self.model_exception_map[deployment] = deployment_exceptions
self.print_verbose("\nEXCEPTION FOR DEPLOYMENTS\n")
self.print_verbose(self.model_exception_map)
for model in self.model_exception_map:
self.print_verbose(f"Model {model} had {len(self.model_exception_map[model])} exception")
if custom_llm_provider:
model_name = f"{custom_llm_provider}/{model_name}"
except Exception as e:
raise e
def log_retry(self, kwargs: dict, e: Exception) -> dict:
"""
When a retry or fallback happens, log the details of the just failed model call - similar to Sentry breadcrumbing
"""
try:
# Log failed model as the previous model
previous_model = {"exception_type": type(e).__name__, "exception_string": str(e)}
for k, v in kwargs.items(): # log everything in kwargs except the old previous_models value - prevent nesting
if k != "metadata":
previous_model[k] = v
elif k == "metadata" and isinstance(v, dict):
previous_model["metadata"] = {} # type: ignore
for metadata_k, metadata_v in kwargs['metadata'].items():
if metadata_k != "previous_models":
previous_model[k][metadata_k] = metadata_v # type: ignore
self.previous_models.append(previous_model)
kwargs["metadata"]["previous_models"] = self.previous_models
return kwargs
except Exception as e:
raise e
def _set_cooldown_deployments(self,
deployment: str):
"""
Add a model to the list of models being cooled down for that minute, if it exceeds the allowed fails / minute
"""
current_minute = datetime.now().strftime("%H-%M")
# get current fails for deployment
# update the number of failed calls
# if it's > allowed fails
# cooldown deployment
current_fails = self.failed_calls.get_cache(key=deployment) or 0
updated_fails = current_fails + 1
self.print_verbose(f"Attempting to add {deployment} to cooldown list. updated_fails: {updated_fails}; self.allowed_fails: {self.allowed_fails}")
if updated_fails > self.allowed_fails:
# get the current cooldown list for that minute
cooldown_key = f"{current_minute}:cooldown_models" # group cooldown models by minute to reduce number of redis calls
cached_value = self.cache.get_cache(key=cooldown_key)
self.print_verbose(f"adding {deployment} to cooldown models")
# update value
try:
if deployment in cached_value:
pass
else:
cached_value = cached_value + [deployment]
# save updated value
self.cache.set_cache(value=cached_value, key=cooldown_key, ttl=1)
except:
cached_value = [deployment]
# save updated value
self.cache.set_cache(value=cached_value, key=cooldown_key, ttl=1)
else:
self.failed_calls.set_cache(key=deployment, value=updated_fails, ttl=1)
def _get_cooldown_deployments(self):
"""
Get the list of models being cooled down for this minute
"""
current_minute = datetime.now().strftime("%H-%M")
# get the current cooldown list for that minute
cooldown_key = f"{current_minute}:cooldown_models"
# ----------------------
# Return cooldown models
# ----------------------
cooldown_models = self.cache.get_cache(key=cooldown_key) or []
self.print_verbose(f"retrieve cooldown models: {cooldown_models}")
return cooldown_models
def get_usage_based_available_deployment(self,
model: str,
messages: Optional[List[Dict[str, str]]] = None,
input: Optional[Union[str, List]] = None):
"""
Returns a deployment with the lowest TPM/RPM usage.
"""
# get list of potential deployments
potential_deployments = []
for item in self.model_list:
if item["model_name"] == model:
potential_deployments.append(item)
# get current call usage
token_count = 0
if messages is not None:
token_count = litellm.token_counter(model=model, messages=messages)
elif input is not None:
if isinstance(input, List):
input_text = "".join(text for text in input)
else:
input_text = input
token_count = litellm.token_counter(model=model, text=input_text)
# -----------------------
# Find lowest used model
# ----------------------
lowest_tpm = float("inf")
deployment = None
# return deployment with lowest tpm usage
for item in potential_deployments:
item_tpm, item_rpm = self._get_deployment_usage(deployment_name=item["litellm_params"]["model"])
if item_tpm == 0:
return item
elif ("tpm" in item and item_tpm + token_count > item["tpm"]
or "rpm" in item and item_rpm + 1 >= item["rpm"]): # if user passed in tpm / rpm in the model_list
continue
elif item_tpm < lowest_tpm:
lowest_tpm = item_tpm
deployment = item
# if none, raise exception
if deployment is None:
raise ValueError("No models available.")
# return model
return deployment
def _get_deployment_usage(
self,
deployment_name: str
):
# ------------
# Setup values
# ------------
current_minute = datetime.now().strftime("%H-%M")
tpm_key = f'{deployment_name}:tpm:{current_minute}'
rpm_key = f'{deployment_name}:rpm:{current_minute}'
# ------------
# Return usage
# ------------
tpm = self.cache.get_cache(key=tpm_key) or 0
rpm = self.cache.get_cache(key=rpm_key) or 0
return int(tpm), int(rpm)
def increment(self, key: str, increment_value: int):
# get value
cached_value = self.cache.get_cache(key=key)
# update value
try:
cached_value = cached_value + increment_value
except:
cached_value = increment_value
# save updated value
self.cache.set_cache(value=cached_value, key=key, ttl=self.default_cache_time_seconds)
def _set_deployment_usage(
self,
model_name: str,
total_tokens: int
):
# ------------
# Setup values
# ------------
current_minute = datetime.now().strftime("%H-%M")
tpm_key = f'{model_name}:tpm:{current_minute}'
rpm_key = f'{model_name}:rpm:{current_minute}'
# ------------
# Update usage
# ------------
self.increment(tpm_key, total_tokens)
self.increment(rpm_key, 1)
def _start_health_check_thread(self):
"""
Starts a separate thread to perform health checks periodically.
"""
health_check_thread = threading.Thread(target=self._perform_health_checks, daemon=True)
health_check_thread.start()
def _perform_health_checks(self):
"""
Periodically performs health checks on the servers.
Updates the list of healthy servers accordingly.
"""
while True:
self.healthy_deployments = self._health_check()
# Adjust the time interval based on your needs
time.sleep(15)
def _health_check(self):
"""
Performs a health check on the deployments
Returns the list of healthy deployments
"""
healthy_deployments = []
for deployment in self.model_list:
litellm_args = deployment["litellm_params"]
try:
start_time = time.time()
litellm.completion(messages=[{"role": "user", "content": ""}], max_tokens=1, **litellm_args) # hit the server with a blank message to see how long it takes to respond
end_time = time.time()
response_time = end_time - start_time
logging.debug(f"response_time: {response_time}")
healthy_deployments.append((deployment, response_time))
healthy_deployments.sort(key=lambda x: x[1])
except Exception as e:
pass
return healthy_deployments
def weighted_shuffle_by_latency(self, items):
# Sort the items by latency
sorted_items = sorted(items, key=lambda x: x[1])
# Get only the latencies
latencies = [i[1] for i in sorted_items]
# Calculate the sum of all latencies
total_latency = sum(latencies)
# Calculate the weight for each latency (lower latency = higher weight)
weights = [total_latency-latency for latency in latencies]
# Get a weighted random item
if sum(weights) == 0:
chosen_item = random.choice(sorted_items)[0]
else:
chosen_item = random.choices(sorted_items, weights=weights, k=1)[0][0]
return chosen_item
def set_model_list(self, model_list: list):
self.model_list = copy.deepcopy(model_list)
# we add api_base/api_key each model so load balancing between azure/gpt on api_base1 and api_base2 works
import os
for model in self.model_list:
litellm_params = model.get("litellm_params", {})
model_name = litellm_params.get("model")
#### MODEL ID INIT ########
model_info = model.get("model_info", {})
model_info["id"] = model_info.get("id", str(uuid.uuid4()))
model["model_info"] = model_info
#### for OpenAI / Azure we need to initalize the Client for High Traffic ########
custom_llm_provider = litellm_params.get("custom_llm_provider")
if custom_llm_provider is None:
custom_llm_provider = model_name.split("/",1)[0]
if (
model_name in litellm.open_ai_chat_completion_models
or custom_llm_provider == "custom_openai"
or custom_llm_provider == "deepinfra"
or custom_llm_provider == "perplexity"
or custom_llm_provider == "anyscale"
or custom_llm_provider == "openai"
or custom_llm_provider == "azure"
or "ft:gpt-3.5-turbo" in model_name
or model_name in litellm.open_ai_embedding_models
):
# glorified / complicated reading of configs
# user can pass vars directly or they can pas os.environ/AZURE_API_KEY, in which case we will read the env
# we do this here because we init clients for Azure, OpenAI and we need to set the right key
api_key = litellm_params.get("api_key")
if api_key and api_key.startswith("os.environ/"):
api_key_env_name = api_key.replace("os.environ/", "")
api_key = litellm.get_secret(api_key_env_name)
litellm_params["api_key"] = api_key
api_base = litellm_params.get("api_base")
base_url = litellm_params.get("base_url")
api_base = api_base or base_url # allow users to pass in `api_base` or `base_url` for azure
if api_base and api_base.startswith("os.environ/"):
api_base_env_name = api_base.replace("os.environ/", "")
api_base = litellm.get_secret(api_base_env_name)
litellm_params["api_base"] = api_base
api_version = litellm_params.get("api_version")
if api_version and api_version.startswith("os.environ/"):
api_version_env_name = api_version.replace("os.environ/", "")
api_version = litellm.get_secret(api_version_env_name)
litellm_params["api_version"] = api_version
timeout = litellm_params.pop("timeout", None)
if isinstance(timeout, str) and timeout.startswith("os.environ/"):
timeout_env_name = timeout.replace("os.environ/", "")
timeout = litellm.get_secret(timeout_env_name)
litellm_params["timeout"] = timeout
stream_timeout = litellm_params.pop("stream_timeout", timeout) # if no stream_timeout is set, default to timeout
if isinstance(stream_timeout, str) and stream_timeout.startswith("os.environ/"):
stream_timeout_env_name = stream_timeout.replace("os.environ/", "")
stream_timeout = litellm.get_secret(stream_timeout_env_name)
litellm_params["stream_timeout"] = stream_timeout
max_retries = litellm_params.pop("max_retries", 2)
if isinstance(max_retries, str) and max_retries.startswith("os.environ/"):
max_retries_env_name = max_retries.replace("os.environ/", "")
max_retries = litellm.get_secret(max_retries_env_name)
litellm_params["max_retries"] = max_retries
if "azure" in model_name:
if api_base is None:
raise ValueError("api_base is required for Azure OpenAI. Set it on your config")
if api_version is None:
api_version = "2023-07-01-preview"
if "gateway.ai.cloudflare.com" in api_base:
if not api_base.endswith("/"):
api_base += "/"
azure_model = model_name.replace("azure/", "")
api_base += f"{azure_model}"
model["async_client"] = openai.AsyncAzureOpenAI(
api_key=api_key,
base_url=api_base,
api_version=api_version,
timeout=timeout,
max_retries=max_retries
)
model["client"] = openai.AzureOpenAI(
api_key=api_key,
base_url=api_base,
api_version=api_version,
timeout=timeout,
max_retries=max_retries
)
# streaming clients can have diff timeouts
model["stream_async_client"] = openai.AsyncAzureOpenAI(
api_key=api_key,
base_url=api_base,
api_version=api_version,
timeout=stream_timeout,
max_retries=max_retries
)
model["stream_client"] = openai.AzureOpenAI(
api_key=api_key,
base_url=api_base,
api_version=api_version,
timeout=stream_timeout,
max_retries=max_retries
)
else:
self.print_verbose(f"Initializing Azure OpenAI Client for {model_name}, Api Base: {str(api_base)}, Api Key:{api_key}")
model["async_client"] = openai.AsyncAzureOpenAI(
api_key=api_key,
azure_endpoint=api_base,
api_version=api_version,
timeout=timeout,
max_retries=max_retries
)
model["client"] = openai.AzureOpenAI(
api_key=api_key,
azure_endpoint=api_base,
api_version=api_version,
timeout=timeout,
max_retries=max_retries
)
# streaming clients should have diff timeouts
model["stream_async_client"] = openai.AsyncAzureOpenAI(
api_key=api_key,
azure_endpoint=api_base,
api_version=api_version,
timeout=stream_timeout,
max_retries=max_retries
)
model["stream_client"] = openai.AzureOpenAI(
api_key=api_key,
azure_endpoint=api_base,
api_version=api_version,
timeout=stream_timeout,
max_retries=max_retries
)
else:
self.print_verbose(f"Initializing OpenAI Client for {model_name}, Api Base:{str(api_base)}, Api Key:{api_key}")
model["async_client"] = openai.AsyncOpenAI(
api_key=api_key,
base_url=api_base,
timeout=timeout,
max_retries=max_retries
)
model["client"] = openai.OpenAI(
api_key=api_key,
base_url=api_base,
timeout=timeout,
max_retries=max_retries
)
# streaming clients should have diff timeouts
model["stream_async_client"] = openai.AsyncOpenAI(
api_key=api_key,
base_url=api_base,
timeout=stream_timeout,
max_retries=max_retries
)
# streaming clients should have diff timeouts
model["stream_client"] = openai.OpenAI(
api_key=api_key,
base_url=api_base,
timeout=stream_timeout,
max_retries=max_retries
)
############ End of initializing Clients for OpenAI/Azure ###################
self.deployment_names.append(model["litellm_params"]["model"])
model_id = ""
for key in model["litellm_params"]:
if key != "api_key" and key != "metadata":
model_id+= str(model["litellm_params"][key])
model["litellm_params"]["model"] += "-ModelID-" + model_id
self.print_verbose(f"\n Initialized Model List {self.model_list}")
############ Users can either pass tpm/rpm as a litellm_param or a router param ###########
# for get_available_deployment, we use the litellm_param["rpm"]
# in this snippet we also set rpm to be a litellm_param
if model["litellm_params"].get("rpm") is None and model.get("rpm") is not None:
model["litellm_params"]["rpm"] = model.get("rpm")
if model["litellm_params"].get("tpm") is None and model.get("tpm") is not None:
model["litellm_params"]["tpm"] = model.get("tpm")
self.model_names = [m["model_name"] for m in model_list]
def get_model_names(self):
return self.model_names
def _get_client(self, deployment, kwargs, client_type=None):
"""
Returns the appropriate client based on the given deployment, kwargs, and client_type.
Parameters:
deployment (dict): The deployment dictionary containing the clients.
kwargs (dict): The keyword arguments passed to the function.
client_type (str): The type of client to return.
Returns:
The appropriate client based on the given client_type and kwargs.
"""
if client_type == "async":
if kwargs.get("stream") == True:
return deployment.get("stream_async_client", None)
else:
return deployment.get("async_client", None)
else:
if kwargs.get("stream") == True:
return deployment.get("stream_client", None)
else:
return deployment.get("client", None)
def print_verbose(self, print_statement):
try:
if self.set_verbose or litellm.set_verbose:
print(f"LiteLLM.Router: {print_statement}") # noqa
except:
pass
def get_available_deployment(self,
model: str,
messages: Optional[List[Dict[str, str]]] = None,
input: Optional[Union[str, List]] = None,
specific_deployment: Optional[bool] = False
):
"""
Returns the deployment based on routing strategy
"""
# users need to explicitly call a specific deployment, by setting `specific_deployment = True` as completion()/embedding() kwarg
# When this was no explicit we had several issues with fallbacks timing out
if specific_deployment == True:
# users can also specify a specific deployment name. At this point we should check if they are just trying to call a specific deployment
for deployment in self.model_list:
cleaned_model = litellm.utils.remove_model_id(deployment.get("litellm_params").get("model"))
if cleaned_model == model:
# User Passed a specific deployment name on their config.yaml, example azure/chat-gpt-v-2
# return the first deployment where the `model` matches the specificed deployment name
return deployment
raise ValueError(f"LiteLLM Router: Trying to call specific deployment, but Model:{model} does not exist in Model List: {self.model_list}")
# check if aliases set on litellm model alias map
if model in litellm.model_group_alias_map:
self.print_verbose(f"Using a model alias. Got Request for {model}, sending requests to {litellm.model_group_alias_map.get(model)}")
model = litellm.model_group_alias_map[model]
## get healthy deployments
### get all deployments
### filter out the deployments currently cooling down
healthy_deployments = [m for m in self.model_list if m["model_name"] == model]
if len(healthy_deployments) == 0:
# check if the user sent in a deployment name instead
healthy_deployments = [m for m in self.model_list if m["litellm_params"]["model"] == model]
self.print_verbose(f"initial list of deployments: {healthy_deployments}")
deployments_to_remove = []
cooldown_deployments = self._get_cooldown_deployments()
self.print_verbose(f"cooldown deployments: {cooldown_deployments}")
### FIND UNHEALTHY DEPLOYMENTS
for deployment in healthy_deployments:
deployment_name = deployment["litellm_params"]["model"]
if deployment_name in cooldown_deployments:
deployments_to_remove.append(deployment)
### FILTER OUT UNHEALTHY DEPLOYMENTS
for deployment in deployments_to_remove:
healthy_deployments.remove(deployment)
self.print_verbose(f"healthy deployments: length {len(healthy_deployments)} {healthy_deployments}")
if len(healthy_deployments) == 0:
raise ValueError("No models available")
if litellm.model_alias_map and model in litellm.model_alias_map:
model = litellm.model_alias_map[
model
] # update the model to the actual value if an alias has been passed in
if self.routing_strategy == "least-busy" and self.leastbusy_logger is not None:
deployments = self.leastbusy_logger.get_available_deployments(model_group=model)
# pick least busy deployment
min_traffic = float('inf')
min_deployment = None
for k, v in deployments.items():
if v < min_traffic:
min_deployment = k
############## No Available Deployments passed, we do a random pick #################
if min_deployment is None:
min_deployment = random.choice(healthy_deployments)
############## Available Deployments passed, we find the relevant item #################
else:
for m in healthy_deployments:
if m["model_info"]["id"] == min_deployment:
return m
min_deployment = random.choice(healthy_deployments)
return min_deployment
elif self.routing_strategy == "simple-shuffle":
# if users pass rpm or tpm, we do a random weighted pick - based on rpm/tpm
############## Check if we can do a RPM/TPM based weighted pick #################
rpm = healthy_deployments[0].get("litellm_params").get("rpm", None)
if rpm is not None:
# use weight-random pick if rpms provided
rpms = [m["litellm_params"].get("rpm", 0) for m in healthy_deployments]
self.print_verbose(f"\nrpms {rpms}")
total_rpm = sum(rpms)
weights = [rpm / total_rpm for rpm in rpms]
self.print_verbose(f"\n weights {weights}")
# Perform weighted random pick
selected_index = random.choices(range(len(rpms)), weights=weights)[0]
self.print_verbose(f"\n selected index, {selected_index}")
deployment = healthy_deployments[selected_index]
return deployment or deployment[0]
############## Check if we can do a RPM/TPM based weighted pick #################
tpm = healthy_deployments[0].get("litellm_params").get("tpm", None)
if tpm is not None:
# use weight-random pick if rpms provided
tpms = [m["litellm_params"].get("tpm", 0) for m in healthy_deployments]
self.print_verbose(f"\ntpms {tpms}")
total_tpm = sum(tpms)
weights = [tpm / total_tpm for tpm in tpms]
self.print_verbose(f"\n weights {weights}")
# Perform weighted random pick
selected_index = random.choices(range(len(tpms)), weights=weights)[0]
self.print_verbose(f"\n selected index, {selected_index}")
deployment = healthy_deployments[selected_index]
return deployment or deployment[0]
############## No RPM/TPM passed, we do a random pick #################
item = random.choice(healthy_deployments)
return item or item[0]
elif self.routing_strategy == "latency-based-routing":
returned_item = None
lowest_latency = float('inf')
### shuffles with priority for lowest latency
# items_with_latencies = [('A', 10), ('B', 20), ('C', 30), ('D', 40)]
items_with_latencies = []
for item in healthy_deployments:
items_with_latencies.append((item, self.deployment_latency_map[item["litellm_params"]["model"]]))
returned_item = self.weighted_shuffle_by_latency(items_with_latencies)
return returned_item
elif self.routing_strategy == "usage-based-routing":
return self.get_usage_based_available_deployment(model=model, messages=messages, input=input)
raise ValueError("No models available.")
def flush_cache(self):
litellm.cache = None
self.cache.flush_cache()
def reset(self):
## clean up on close
litellm.success_callback = []
litellm.__async_success_callback = []
litellm.failure_callback = []
litellm._async_failure_callback = []
self.flush_cache()
| [] |
2024-01-10 | Boostchat-ai/litellm | litellm~proxy~proxy_cli.py | import click
import subprocess, traceback, json
import os, sys
import random, appdirs
from datetime import datetime
from dotenv import load_dotenv
import operator
sys.path.append(os.getcwd())
config_filename = "litellm.secrets"
# Using appdirs to determine user-specific config path
config_dir = appdirs.user_config_dir("litellm")
user_config_path = os.getenv("LITELLM_CONFIG_PATH", os.path.join(config_dir, config_filename))
load_dotenv()
from importlib import resources
import shutil
telemetry = None
def run_ollama_serve():
try:
command = ['ollama', 'serve']
with open(os.devnull, 'w') as devnull:
process = subprocess.Popen(command, stdout=devnull, stderr=devnull)
except Exception as e:
print(f"""
LiteLLM Warning: proxy started with `ollama` model\n`ollama serve` failed with Exception{e}. \nEnsure you run `ollama serve`
""") # noqa
def clone_subfolder(repo_url, subfolder, destination):
# Clone the full repo
repo_name = repo_url.split('/')[-1]
repo_master = os.path.join(destination, "repo_master")
subprocess.run(['git', 'clone', repo_url, repo_master])
# Move into the subfolder
subfolder_path = os.path.join(repo_master, subfolder)
# Copy subfolder to destination
for file_name in os.listdir(subfolder_path):
source = os.path.join(subfolder_path, file_name)
if os.path.isfile(source):
shutil.copy(source, destination)
else:
dest_path = os.path.join(destination, file_name)
shutil.copytree(source, dest_path)
# Remove cloned repo folder
subprocess.run(['rm', '-rf', os.path.join(destination, "repo_master")])
feature_telemetry(feature="create-proxy")
def is_port_in_use(port):
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(('localhost', port)) == 0
@click.command()
@click.option('--host', default='0.0.0.0', help='Host for the server to listen on.')
@click.option('--port', default=8000, help='Port to bind the server to.')
@click.option('--num_workers', default=1, help='Number of uvicorn workers to spin up')
@click.option('--api_base', default=None, help='API base URL.')
@click.option('--api_version', default="2023-07-01-preview", help='For azure - pass in the api version.')
@click.option('--model', '-m', default=None, help='The model name to pass to litellm expects')
@click.option('--alias', default=None, help='The alias for the model - use this to give a litellm model name (e.g. "huggingface/codellama/CodeLlama-7b-Instruct-hf") a more user-friendly name ("codellama")')
@click.option('--add_key', default=None, help='The model name to pass to litellm expects')
@click.option('--headers', default=None, help='headers for the API call')
@click.option('--save', is_flag=True, type=bool, help='Save the model-specific config')
@click.option('--debug', default=False, is_flag=True, type=bool, help='To debug the input')
@click.option('--use_queue', default=False, is_flag=True, type=bool, help='To use celery workers for async endpoints')
@click.option('--temperature', default=None, type=float, help='Set temperature for the model')
@click.option('--max_tokens', default=None, type=int, help='Set max tokens for the model')
@click.option('--request_timeout', default=600, type=int, help='Set timeout in seconds for completion calls')
@click.option('--drop_params', is_flag=True, help='Drop any unmapped params')
@click.option('--add_function_to_prompt', is_flag=True, help='If function passed but unsupported, pass it as prompt')
@click.option('--config', '-c', default=None, help='Path to the proxy configuration file (e.g. config.yaml). Usage `litellm --config config.yaml`')
@click.option('--max_budget', default=None, type=float, help='Set max budget for API calls - works for hosted models like OpenAI, TogetherAI, Anthropic, etc.`')
@click.option('--telemetry', default=True, type=bool, help='Helps us know if people are using this feature. Turn this off by doing `--telemetry False`')
@click.option('--logs', flag_value=False, type=int, help='Gets the "n" most recent logs. By default gets most recent log.')
@click.option('--health', flag_value=True, help='Make a chat/completions request to all llms in config.yaml')
@click.option('--test', flag_value=True, help='proxy chat completions url to make a test request to')
@click.option('--test_async', default=False, is_flag=True, help='Calls async endpoints /queue/requests and /queue/response')
@click.option('--num_requests', default=10, type=int, help='Number of requests to hit async endpoint with')
@click.option('--local', is_flag=True, default=False, help='for local debugging')
def run_server(host, port, api_base, api_version, model, alias, add_key, headers, save, debug, temperature, max_tokens, request_timeout, drop_params, add_function_to_prompt, config, max_budget, telemetry, logs, test, local, num_workers, test_async, num_requests, use_queue, health):
global feature_telemetry
args = locals()
if local:
from proxy_server import app, save_worker_config, usage_telemetry
else:
try:
from .proxy_server import app, save_worker_config, usage_telemetry
except ImportError as e:
from proxy_server import app, save_worker_config, usage_telemetry
feature_telemetry = usage_telemetry
if logs is not None:
if logs == 0: # default to 1
logs = 1
try:
with open('api_log.json') as f:
data = json.load(f)
# convert keys to datetime objects
log_times = {datetime.strptime(k, "%Y%m%d%H%M%S%f"): v for k, v in data.items()}
# sort by timestamp
sorted_times = sorted(log_times.items(), key=operator.itemgetter(0), reverse=True)
# get n recent logs
recent_logs = {k.strftime("%Y%m%d%H%M%S%f"): v for k, v in sorted_times[:logs]}
print(json.dumps(recent_logs, indent=4)) # noqa
except:
raise Exception("LiteLLM: No logs saved!")
return
if model and "ollama" in model and api_base is None:
run_ollama_serve()
if test_async is True:
import requests, concurrent, time
api_base = f"http://{host}:{port}"
def _make_openai_completion():
data = {
"model": "gpt-3.5-turbo",
"messages": [{"role": "user", "content": "Write a short poem about the moon"}]
}
response = requests.post("http://0.0.0.0:8000/queue/request", json=data)
response = response.json()
while True:
try:
url = response["url"]
polling_url = f"{api_base}{url}"
polling_response = requests.get(polling_url)
polling_response = polling_response.json()
print("\n RESPONSE FROM POLLING JOB", polling_response)
status = polling_response["status"]
if status == "finished":
llm_response = polling_response["result"]
break
print(f"POLLING JOB{polling_url}\nSTATUS: {status}, \n Response {polling_response}") # noqa
time.sleep(0.5)
except Exception as e:
print("got exception in polling", e)
break
# Number of concurrent calls (you can adjust this)
concurrent_calls = num_requests
# List to store the futures of concurrent calls
futures = []
start_time = time.time()
# Make concurrent calls
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrent_calls) as executor:
for _ in range(concurrent_calls):
futures.append(executor.submit(_make_openai_completion))
# Wait for all futures to complete
concurrent.futures.wait(futures)
# Summarize the results
successful_calls = 0
failed_calls = 0
for future in futures:
if future.done():
if future.result() is not None:
successful_calls += 1
else:
failed_calls += 1
end_time = time.time()
print(f"Elapsed Time: {end_time-start_time}")
print(f"Load test Summary:")
print(f"Total Requests: {concurrent_calls}")
print(f"Successful Calls: {successful_calls}")
print(f"Failed Calls: {failed_calls}")
return
if health != False:
import requests
print("\nLiteLLM: Health Testing models in config")
response = requests.get(url=f"http://{host}:{port}/health")
print(json.dumps(response.json(), indent=4))
return
if test != False:
click.echo('\nLiteLLM: Making a test ChatCompletions request to your proxy')
import openai
if test == True: # flag value set
api_base = f"http://{host}:{port}"
else:
api_base = test
client = openai.OpenAI(
api_key="My API Key",
base_url=api_base
)
response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [
{
"role": "user",
"content": "this is a test request, write a short poem"
}
], max_tokens=256)
click.echo(f'\nLiteLLM: response from proxy {response}')
print("\n Making streaming request to proxy")
response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [
{
"role": "user",
"content": "this is a test request, write a short poem"
}
],
stream=True,
)
for chunk in response:
click.echo(f'LiteLLM: streaming response from proxy {chunk}')
print("\n making completion request to proxy")
response = client.completions.create(model="gpt-3.5-turbo", prompt='this is a test request, write a short poem')
print(response)
return
else:
if headers:
headers = json.loads(headers)
save_worker_config(model=model, alias=alias, api_base=api_base, api_version=api_version, debug=debug, temperature=temperature, max_tokens=max_tokens, request_timeout=request_timeout, max_budget=max_budget, telemetry=telemetry, drop_params=drop_params, add_function_to_prompt=add_function_to_prompt, headers=headers, save=save, config=config, use_queue=use_queue)
try:
import uvicorn
except:
raise ImportError("Uvicorn needs to be imported. Run - `pip install uvicorn`")
if port == 8000 and is_port_in_use(port):
port = random.randint(1024, 49152)
uvicorn.run("litellm.proxy.proxy_server:app", host=host, port=port, workers=num_workers)
if __name__ == "__main__":
run_server()
| [
"this is a test request, write a short poem",
"Write a short poem about the moon"
] |
2024-01-10 | Boostchat-ai/litellm | litellm~utils.py | # +-----------------------------------------------+
# | |
# | Give Feedback / Get Help |
# | https://github.com/BerriAI/litellm/issues/new |
# | |
# +-----------------------------------------------+
#
# Thank you users! We ❤️ you! - Krrish & Ishaan
import sys, re
import dotenv, json, traceback, threading
import subprocess, os
import litellm, openai
import itertools
import random, uuid, requests
import datetime, time
import tiktoken
import uuid
import aiohttp
import logging
import asyncio, httpx, inspect
import copy
from tokenizers import Tokenizer
from dataclasses import (
dataclass,
field,
) # for storing API inputs, outputs, and metadata
encoding = tiktoken.get_encoding("cl100k_base")
import importlib.metadata
from .integrations.traceloop import TraceloopLogger
from .integrations.helicone import HeliconeLogger
from .integrations.aispend import AISpendLogger
from .integrations.berrispend import BerriSpendLogger
from .integrations.supabase import Supabase
from .integrations.llmonitor import LLMonitorLogger
from .integrations.prompt_layer import PromptLayerLogger
from .integrations.langsmith import LangsmithLogger
from .integrations.weights_biases import WeightsBiasesLogger
from .integrations.custom_logger import CustomLogger
from .integrations.langfuse import LangFuseLogger
from .integrations.litedebugger import LiteDebugger
from openai import OpenAIError as OriginalError
from openai._models import BaseModel as OpenAIObject
from .exceptions import (
AuthenticationError,
BadRequestError,
RateLimitError,
ServiceUnavailableError,
OpenAIError,
ContextWindowExceededError,
Timeout,
APIConnectionError,
APIError,
BudgetExceededError
)
from typing import cast, List, Dict, Union, Optional, Literal
from .caching import Cache
from concurrent.futures import ThreadPoolExecutor
####### ENVIRONMENT VARIABLES ####################
# Adjust to your specific application needs / system capabilities.
MAX_THREADS = 100
# Create a ThreadPoolExecutor
executor = ThreadPoolExecutor(max_workers=MAX_THREADS)
dotenv.load_dotenv() # Loading env variables using dotenv
sentry_sdk_instance = None
capture_exception = None
add_breadcrumb = None
posthog = None
slack_app = None
alerts_channel = None
heliconeLogger = None
promptLayerLogger = None
langsmithLogger = None
weightsBiasesLogger = None
customLogger = None
langFuseLogger = None
llmonitorLogger = None
aispendLogger = None
berrispendLogger = None
supabaseClient = None
liteDebuggerClient = None
callback_list: Optional[List[str]] = []
user_logger_fn = None
additional_details: Optional[Dict[str, str]] = {}
local_cache: Optional[Dict[str, str]] = {}
last_fetched_at = None
last_fetched_at_keys = None
######## Model Response #########################
# All liteLLM Model responses will be in this format, Follows the OpenAI Format
# https://docs.litellm.ai/docs/completion/output
# {
# 'choices': [
# {
# 'finish_reason': 'stop',
# 'index': 0,
# 'message': {
# 'role': 'assistant',
# 'content': " I'm doing well, thank you for asking. I am Claude, an AI assistant created by Anthropic."
# }
# }
# ],
# 'created': 1691429984.3852863,
# 'model': 'claude-instant-1',
# 'usage': {'prompt_tokens': 18, 'completion_tokens': 23, 'total_tokens': 41}
# }
class UnsupportedParamsError(Exception):
def __init__(self, status_code, message):
self.status_code = status_code
self.message = message
self.request = httpx.Request(method="POST", url=" https://openai.api.com/v1/")
self.response = httpx.Response(status_code=status_code, request=self.request)
super().__init__(
self.message
) # Call the base class constructor with the parameters it needs
def _generate_id(): # private helper function
return 'chatcmpl-' + str(uuid.uuid4())
def map_finish_reason(finish_reason: str): # openai supports 5 stop sequences - 'stop', 'length', 'function_call', 'content_filter', 'null'
# anthropic mapping
if finish_reason == "stop_sequence":
return "stop"
# cohere mapping - https://docs.cohere.com/reference/generate
elif finish_reason == "COMPLETE":
return "stop"
elif finish_reason == "MAX_TOKENS":
return "length"
elif finish_reason == "ERROR_TOXIC":
return "content_filter"
elif finish_reason == "ERROR": # openai currently doesn't support an 'error' finish reason
return "stop"
# huggingface mapping https://huggingface.github.io/text-generation-inference/#/Text%20Generation%20Inference/generate_stream
elif finish_reason == "eos_token" or finish_reason == "stop_sequence":
return "stop"
return finish_reason
class FunctionCall(OpenAIObject):
arguments: str
name: str
class Function(OpenAIObject):
arguments: str
name: str
class ChatCompletionMessageToolCall(OpenAIObject):
id: str
function: Function
type: str
class Message(OpenAIObject):
def __init__(self, content="default", role="assistant", logprobs=None, function_call=None, tool_calls=None, **params):
super(Message, self).__init__(**params)
self.content = content
self.role = role
if function_call is not None:
self.function_call = FunctionCall(**function_call)
if tool_calls is not None:
self.tool_calls = []
for tool_call in tool_calls:
self.tool_calls.append(
ChatCompletionMessageToolCall(**tool_call)
)
if logprobs is not None:
self._logprobs = logprobs
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
def json(self, **kwargs):
try:
return self.model_dump() # noqa
except:
# if using pydantic v1
return self.dict()
class Delta(OpenAIObject):
def __init__(self, content=None, role=None, **params):
super(Delta, self).__init__(**params)
self.content = content
self.role = role
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class Choices(OpenAIObject):
def __init__(self, finish_reason=None, index=0, message=None, **params):
super(Choices, self).__init__(**params)
self.finish_reason = map_finish_reason(finish_reason) # set finish_reason for all responses
self.index = index
if message is None:
self.message = Message(content=None)
else:
self.message = message
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class Usage(OpenAIObject):
def __init__(self, prompt_tokens=None, completion_tokens=None, total_tokens=None, **params):
super(Usage, self).__init__(**params)
if prompt_tokens:
self.prompt_tokens = prompt_tokens
if completion_tokens:
self.completion_tokens = completion_tokens
if total_tokens:
self.total_tokens = total_tokens
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class StreamingChoices(OpenAIObject):
def __init__(self, finish_reason=None, index=0, delta: Optional[Delta]=None, **params):
super(StreamingChoices, self).__init__(**params)
if finish_reason:
self.finish_reason = finish_reason
else:
self.finish_reason = None
self.index = index
if delta:
self.delta = delta
else:
self.delta = Delta()
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class ModelResponse(OpenAIObject):
id: str
"""A unique identifier for the completion."""
choices: List[Union[Choices, StreamingChoices]]
"""The list of completion choices the model generated for the input prompt."""
created: int
"""The Unix timestamp (in seconds) of when the completion was created."""
model: Optional[str] = None
"""The model used for completion."""
object: str
"""The object type, which is always "text_completion" """
system_fingerprint: Optional[str] = None
"""This fingerprint represents the backend configuration that the model runs with.
Can be used in conjunction with the `seed` request parameter to understand when
backend changes have been made that might impact determinism.
"""
usage: Optional[Usage] = None
"""Usage statistics for the completion request."""
_hidden_params: dict = {}
def __init__(self, id=None, choices=None, created=None, model=None, object=None, system_fingerprint=None, usage=None, stream=False, response_ms=None, hidden_params=None, **params):
if stream:
object = "chat.completion.chunk"
choices = [StreamingChoices()]
else:
if model in litellm.open_ai_embedding_models:
object = "embedding"
else:
object = "chat.completion"
choices = [Choices()]
if id is None:
id = _generate_id()
else:
id = id
if created is None:
created = int(time.time())
else:
created = created
model = model
if usage:
usage = usage
else:
usage = Usage()
if hidden_params:
self._hidden_params = hidden_params
super().__init__(id=id, choices=choices, created=created, model=model, object=object, system_fingerprint=system_fingerprint, usage=usage, **params)
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class Embedding(OpenAIObject):
embedding: list = []
index: int
object: str
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class EmbeddingResponse(OpenAIObject):
model: Optional[str] = None
"""The model used for embedding."""
data: Optional[List] = None
"""The actual embedding value"""
object: str
"""The object type, which is always "embedding" """
usage: Optional[Usage] = None
"""Usage statistics for the embedding request."""
def __init__(self, model=None, usage=None, stream=False, response_ms=None, data=None):
object = "list"
if response_ms:
_response_ms = response_ms
else:
_response_ms = None
if data:
data = data
else:
data = None
if usage:
usage = usage
else:
usage = Usage()
model = model
super().__init__(model=model, object=object, data=data, usage=usage)
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class TextChoices(OpenAIObject):
def __init__(self, finish_reason=None, index=0, text=None, logprobs=None, **params):
super(TextChoices, self).__init__(**params)
if finish_reason:
self.finish_reason = map_finish_reason(finish_reason)
else:
self.finish_reason = "stop"
self.index = index
if text is not None:
self.text = text
else:
self.text = None
if logprobs:
self.logprobs = []
else:
self.logprobs = logprobs
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
class TextCompletionResponse(OpenAIObject):
"""
{
"id": response["id"],
"object": "text_completion",
"created": response["created"],
"model": response["model"],
"choices": [
{
"text": response["choices"][0]["message"]["content"],
"index": response["choices"][0]["index"],
"logprobs": transformed_logprobs,
"finish_reason": response["choices"][0]["finish_reason"]
}
],
"usage": response["usage"]
}
"""
def __init__(self, id=None, choices=None, created=None, model=None, usage=None, stream=False, response_ms=None, **params):
super(TextCompletionResponse, self).__init__(**params)
if stream:
self.object = "text_completion.chunk"
self.choices = [TextChoices()]
else:
self.object = "text_completion"
self.choices = [TextChoices()]
if id is None:
self.id = _generate_id()
else:
self.id = id
if created is None:
self.created = int(time.time())
else:
self.created = created
if response_ms:
self._response_ms = response_ms
else:
self._response_ms = None
self.model = model
if usage:
self.usage = usage
else:
self.usage = Usage()
self._hidden_params = {} # used in case users want to access the original model response
def __contains__(self, key):
# Define custom behavior for the 'in' operator
return hasattr(self, key)
def get(self, key, default=None):
# Custom .get() method to access attributes with a default value if the attribute doesn't exist
return getattr(self, key, default)
def __getitem__(self, key):
# Allow dictionary-style access to attributes
return getattr(self, key)
def __setitem__(self, key, value):
# Allow dictionary-style assignment of attributes
setattr(self, key, value)
############################################################
def print_verbose(print_statement):
try:
if litellm.set_verbose:
print(print_statement) # noqa
except:
pass
####### LOGGING ###################
from enum import Enum
class CallTypes(Enum):
embedding = 'embedding'
completion = 'completion'
acompletion = 'acompletion'
aembedding = 'aembedding'
# Logging function -> log the exact model details + what's being sent | Non-Blocking
class Logging:
global supabaseClient, liteDebuggerClient, promptLayerLogger, weightsBiasesLogger, langsmithLogger, capture_exception, add_breadcrumb, llmonitorLogger
def __init__(self, model, messages, stream, call_type, start_time, litellm_call_id, function_id):
if call_type not in [item.value for item in CallTypes]:
allowed_values = ", ".join([item.value for item in CallTypes])
raise ValueError(f"Invalid call_type {call_type}. Allowed values: {allowed_values}")
self.model = model
self.messages = messages
self.stream = stream
self.start_time = start_time # log the call start time
self.call_type = call_type
self.litellm_call_id = litellm_call_id
self.function_id = function_id
self.streaming_chunks = [] # for generating complete stream response
def update_environment_variables(self, model, user, optional_params, litellm_params):
self.optional_params = optional_params
self.model = model
self.user = user
self.litellm_params = litellm_params
self.logger_fn = litellm_params["logger_fn"]
print_verbose(f"self.optional_params: {self.optional_params}")
self.model_call_details = {
"model": self.model,
"messages": self.messages,
"optional_params": self.optional_params,
"litellm_params": self.litellm_params,
"start_time": self.start_time,
"stream": self.stream,
"user": user,
**self.optional_params
}
def _pre_call(self, input, api_key, model=None, additional_args={}):
"""
Common helper function across the sync + async pre-call function
"""
# print_verbose(f"logging pre call for model: {self.model} with call type: {self.call_type}")
self.model_call_details["input"] = input
self.model_call_details["api_key"] = api_key
self.model_call_details["additional_args"] = additional_args
self.model_call_details["log_event_type"] = "pre_api_call"
if (
model
): # if model name was changes pre-call, overwrite the initial model call name with the new one
self.model_call_details["model"] = model
def pre_call(self, input, api_key, model=None, additional_args={}):
# Log the exact input to the LLM API
litellm.error_logs['PRE_CALL'] = locals()
try:
self._pre_call(input=input, api_key=api_key, model=model, additional_args=additional_args)
# User Logging -> if you pass in a custom logging function
headers = additional_args.get("headers", {})
if headers is None:
headers = {}
data = additional_args.get("complete_input_dict", {})
api_base = additional_args.get("api_base", "")
masked_headers = {k: (v[:-20] + '*' * 20) if (isinstance(v, str) and len(v) > 20) else v for k, v in headers.items()}
formatted_headers = " ".join([f"-H '{k}: {v}'" for k, v in masked_headers.items()])
print_verbose(f"PRE-API-CALL ADDITIONAL ARGS: {additional_args}")
curl_command = "\n\nPOST Request Sent from LiteLLM:\n"
curl_command += "curl -X POST \\\n"
curl_command += f"{api_base} \\\n"
curl_command += f"{formatted_headers} \\\n" if formatted_headers.strip() != "" else ""
curl_command += f"-d '{str(data)}'\n"
if additional_args.get("request_str", None) is not None:
# print the sagemaker / bedrock client request
curl_command = "\nRequest Sent from LiteLLM:\n"
curl_command += additional_args.get("request_str", None)
elif api_base == "":
curl_command = self.model_call_details
print_verbose(f"\033[92m{curl_command}\033[0m\n")
if self.logger_fn and callable(self.logger_fn):
try:
self.logger_fn(
self.model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
if litellm.max_budget and self.stream:
start_time = self.start_time
end_time = self.start_time # no time has passed as the call hasn't been made yet
time_diff = (end_time - start_time).total_seconds()
float_diff = float(time_diff)
litellm._current_cost += litellm.completion_cost(model=self.model, prompt="".join(message["content"] for message in self.messages), completion="", total_time=float_diff)
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
for callback in litellm.input_callback:
try:
if callback == "supabase":
print_verbose("reaches supabase for logging!")
model = self.model_call_details["model"]
messages = self.model_call_details["input"]
print_verbose(f"supabaseClient: {supabaseClient}")
supabaseClient.input_log_event(
model=model,
messages=messages,
end_user=self.model_call_details.get("user", "default"),
litellm_call_id=self.litellm_params["litellm_call_id"],
print_verbose=print_verbose,
)
elif callback == "lite_debugger":
print_verbose(f"reaches litedebugger for logging! - model_call_details {self.model_call_details}")
model = self.model_call_details["model"]
messages = self.model_call_details["input"]
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
liteDebuggerClient.input_log_event(
model=model,
messages=messages,
end_user=self.model_call_details.get("user", "default"),
litellm_call_id=self.litellm_params["litellm_call_id"],
litellm_params=self.model_call_details["litellm_params"],
optional_params=self.model_call_details["optional_params"],
print_verbose=print_verbose,
call_type=self.call_type
)
elif callback == "sentry" and add_breadcrumb:
print_verbose("reaches sentry breadcrumbing")
add_breadcrumb(
category="litellm.llm_call",
message=f"Model Call Details pre-call: {self.model_call_details}",
level="info",
)
elif isinstance(callback, CustomLogger): # custom logger class
callback.log_pre_api_call(
model=self.model,
messages=self.messages,
kwargs=self.model_call_details,
)
elif callable(callback): # custom logger functions
customLogger.log_input_event(
model=self.model,
messages=self.messages,
kwargs=self.model_call_details,
print_verbose=print_verbose,
callback_func=callback
)
except Exception as e:
traceback.print_exc()
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while input logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
async def async_pre_call(self, result=None, start_time=None, end_time=None, **kwargs):
"""
 Implementing async callbacks, to handle asyncio event loop issues when custom integrations need to use async functions.
"""
start_time, end_time, result = self._success_handler_helper_fn(start_time=start_time, end_time=end_time, result=result)
print_verbose(f"Async input callbacks: {litellm._async_input_callback}")
for callback in litellm._async_input_callback:
try:
if isinstance(callback, CustomLogger): # custom logger class
print_verbose(f"Async input callbacks: CustomLogger")
asyncio.create_task(callback.async_log_input_event(
model=self.model,
messages=self.messages,
kwargs=self.model_call_details,
))
if callable(callback): # custom logger functions
print_verbose(f"Async success callbacks: async_log_event")
asyncio.create_task(customLogger.async_log_input_event(
model=self.model,
messages=self.messages,
kwargs=self.model_call_details,
print_verbose=print_verbose,
callback_func=callback
))
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}"
)
def post_call(self, original_response, input=None, api_key=None, additional_args={}):
# Log the exact result from the LLM API, for streaming - log the type of response received
litellm.error_logs['POST_CALL'] = locals()
try:
self.model_call_details["input"] = input
self.model_call_details["api_key"] = api_key
self.model_call_details["original_response"] = original_response
self.model_call_details["additional_args"] = additional_args
self.model_call_details["log_event_type"] = "post_api_call"
# User Logging -> if you pass in a custom logging function
print_verbose(f"RAW RESPONSE:\n{self.model_call_details.get('original_response', self.model_call_details)}\n\n")
print_verbose(
f"Logging Details Post-API Call: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}"
)
print_verbose(f"Logging Details Post-API Call: LiteLLM Params: {self.model_call_details}")
if self.logger_fn and callable(self.logger_fn):
try:
self.logger_fn(
self.model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
for callback in litellm.input_callback:
try:
if callback == "lite_debugger":
print_verbose("reaches litedebugger for post-call logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
liteDebuggerClient.post_call_log_event(
original_response=original_response,
litellm_call_id=self.litellm_params["litellm_call_id"],
print_verbose=print_verbose,
call_type = self.call_type,
stream = self.stream,
)
elif callback == "sentry" and add_breadcrumb:
print_verbose("reaches sentry breadcrumbing")
add_breadcrumb(
category="litellm.llm_call",
message=f"Model Call Details post-call: {self.model_call_details}",
level="info",
)
elif isinstance(callback, CustomLogger): # custom logger class
callback.log_post_api_call(
kwargs=self.model_call_details,
response_obj=None,
start_time=self.start_time,
end_time=None
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while post-call logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
pass
def _success_handler_helper_fn(self, result=None, start_time=None, end_time=None):
try:
if start_time is None:
start_time = self.start_time
if end_time is None:
end_time = datetime.datetime.now()
self.model_call_details["log_event_type"] = "successful_api_call"
self.model_call_details["end_time"] = end_time
if litellm.max_budget and self.stream:
time_diff = (end_time - start_time).total_seconds()
float_diff = float(time_diff)
litellm._current_cost += litellm.completion_cost(model=self.model, prompt="", completion=result["content"], total_time=float_diff)
return start_time, end_time, result
except:
pass
def success_handler(self, result=None, start_time=None, end_time=None, **kwargs):
print_verbose(
f"Logging Details LiteLLM-Success Call"
)
# print(f"original response in success handler: {self.model_call_details['original_response']}")
try:
print_verbose(f"success callbacks: {litellm.success_callback}")
## BUILD COMPLETE STREAMED RESPONSE
complete_streaming_response = None
if self.stream == True and self.model_call_details.get("litellm_params", {}).get("acompletion", False) == True:
# if it's acompletion == True, chunks are built/appended in async_success_handler
if result.choices[0].finish_reason is not None: # if it's the last chunk
streaming_chunks = self.streaming_chunks + [result]
complete_streaming_response = litellm.stream_chunk_builder(streaming_chunks, messages=self.model_call_details.get("messages", None))
else:
# this is a completion() call
if self.stream == True:
print_verbose("success callback - assembling complete streaming response")
if result.choices[0].finish_reason is not None: # if it's the last chunk
print_verbose(f"success callback - Got the very Last chunk. Assembling {self.streaming_chunks}")
self.streaming_chunks.append(result)
complete_streaming_response = litellm.stream_chunk_builder(self.streaming_chunks, messages=self.model_call_details.get("messages", None))
print_verbose(f"success callback - complete streamign response{complete_streaming_response}")
else:
self.streaming_chunks.append(result)
if complete_streaming_response:
self.model_call_details["complete_streaming_response"] = complete_streaming_response
start_time, end_time, result = self._success_handler_helper_fn(start_time=start_time, end_time=end_time, result=result)
for callback in litellm.success_callback:
try:
if callback == "lite_debugger":
print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
print_verbose(f"liteDebuggerClient details function {self.call_type} and stream set to {self.stream}")
liteDebuggerClient.log_event(
end_user=kwargs.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=self.litellm_call_id,
print_verbose=print_verbose,
call_type = self.call_type,
stream = self.stream,
)
if callback == "promptlayer":
print_verbose("reaches promptlayer for logging!")
promptLayerLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "supabase":
print_verbose("reaches supabase for logging!")
kwargs=self.model_call_details
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
if "complete_streaming_response" not in kwargs:
return
else:
print_verbose("reaches supabase for streaming logging!")
result = kwargs["complete_streaming_response"]
model = kwargs["model"]
messages = kwargs["messages"]
optional_params = kwargs.get("optional_params", {})
litellm_params = kwargs.get("litellm_params", {})
supabaseClient.log_event(
model=model,
messages=messages,
end_user=optional_params.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=litellm_params.get("litellm_call_id", str(uuid.uuid4())),
print_verbose=print_verbose,
)
if callback == "wandb":
print_verbose("reaches wandb for logging!")
weightsBiasesLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "langsmith":
print_verbose("reaches langsmtih for logging!")
langsmithLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "llmonitor":
print_verbose("reaches llmonitor for logging!")
model = self.model
input = self.model_call_details.get("messages", self.model_call_details.get("input", None))
# if contains input, it's 'embedding', otherwise 'llm'
type = "embed" if self.call_type == CallTypes.embedding.value else "llm"
llmonitorLogger.log_event(
type=type,
event="end",
model=model,
input=input,
user_id=self.model_call_details.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
run_id=self.litellm_call_id,
print_verbose=print_verbose,
)
if callback == "helicone":
print_verbose("reaches helicone for logging!")
model = self.model
messages = kwargs["messages"]
heliconeLogger.log_success(
model=model,
messages=messages,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "langfuse":
global langFuseLogger
print_verbose("reaches langfuse for logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if k != "original_response": # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
if "complete_streaming_response" not in kwargs:
return
else:
print_verbose("reaches langfuse for streaming logging!")
result = kwargs["complete_streaming_response"]
if langFuseLogger is None:
langFuseLogger = LangFuseLogger()
langFuseLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
if callback == "cache" and litellm.cache is not None:
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
print_verbose("success_callback: reaches cache for logging!")
kwargs = self.model_call_details
if self.stream:
if "complete_streaming_response" not in kwargs:
print_verbose(f"success_callback: reaches cache for logging, there is no complete_streaming_response. Kwargs={kwargs}\n\n")
return
else:
print_verbose("success_callback: reaches cache for logging, there is a complete_streaming_response. Adding to cache")
result = kwargs["complete_streaming_response"]
# only add to cache once we have a complete streaming response
litellm.cache.add_cache(result, **kwargs)
if callback == "traceloop":
deep_copy = {}
for k, v in self.model_call_details.items():
if k != "original_response":
deep_copy[k] = v
traceloopLogger.log_event(
kwargs=deep_copy,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif isinstance(callback, CustomLogger) and self.model_call_details.get("litellm_params", {}).get("acompletion", False) == False and self.model_call_details.get("litellm_params", {}).get("aembedding", False) == False: # custom logger class
print_verbose(f"success callbacks: Running Custom Logger Class")
if self.stream and complete_streaming_response is None:
callback.log_stream_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time
)
else:
if self.stream and complete_streaming_response:
self.model_call_details["complete_response"] = self.model_call_details.get("complete_streaming_response", {})
result = self.model_call_details["complete_response"]
callback.log_success_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
if callable(callback): # custom logger functions
print_verbose(f"success callbacks: Running Custom Callback Function")
customLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}"
)
pass
async def async_success_handler(self, result=None, start_time=None, end_time=None, **kwargs):
"""
Implementing async callbacks, to handle asyncio event loop issues when custom integrations need to use async functions.
"""
print_verbose(f"Async success callbacks: {litellm._async_success_callback}")
## BUILD COMPLETE STREAMED RESPONSE
complete_streaming_response = None
if self.stream:
if result.choices[0].finish_reason is not None: # if it's the last chunk
self.streaming_chunks.append(result)
# print_verbose(f"final set of received chunks: {self.streaming_chunks}")
try:
complete_streaming_response = litellm.stream_chunk_builder(self.streaming_chunks, messages=self.model_call_details.get("messages", None))
except:
complete_streaming_response = None
else:
self.streaming_chunks.append(result)
if complete_streaming_response:
self.model_call_details["complete_streaming_response"] = complete_streaming_response
start_time, end_time, result = self._success_handler_helper_fn(start_time=start_time, end_time=end_time, result=result)
for callback in litellm._async_success_callback:
try:
if callback == "cache" and litellm.cache is not None:
# set_cache once complete streaming response is built
print_verbose("async success_callback: reaches cache for logging!")
kwargs = self.model_call_details
if self.stream:
if "complete_streaming_response" not in kwargs:
print_verbose(f"async success_callback: reaches cache for logging, there is no complete_streaming_response. Kwargs={kwargs}\n\n")
return
else:
print_verbose("async success_callback: reaches cache for logging, there is a complete_streaming_response. Adding to cache")
result = kwargs["complete_streaming_response"]
# only add to cache once we have a complete streaming response
litellm.cache.add_cache(result, **kwargs)
if isinstance(callback, CustomLogger): # custom logger class
print_verbose(f"Async success callbacks: CustomLogger")
if self.stream:
if "complete_streaming_response" in self.model_call_details:
await callback.async_log_success_event(
kwargs=self.model_call_details,
response_obj=self.model_call_details["complete_streaming_response"],
start_time=start_time,
end_time=end_time,
)
else:
await callback.async_log_stream_event( # [TODO]: move this to being an async log stream event function
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time
)
else:
await callback.async_log_success_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
if callable(callback): # custom logger functions
print_verbose(f"Async success callbacks: async_log_event")
await customLogger.async_log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback
)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}"
)
def _failure_handler_helper_fn(self, exception, traceback_exception, start_time=None, end_time=None):
if start_time is None:
start_time = self.start_time
if end_time is None:
end_time = datetime.datetime.now()
# on some exceptions, model_call_details is not always initialized, this ensures that we still log those exceptions
if not hasattr(self, "model_call_details"):
self.model_call_details = {}
self.model_call_details["log_event_type"] = "failed_api_call"
self.model_call_details["exception"] = exception
self.model_call_details["traceback_exception"] = traceback_exception
self.model_call_details["end_time"] = end_time
self.model_call_details.setdefault("original_response", None)
return start_time, end_time
def failure_handler(self, exception, traceback_exception, start_time=None, end_time=None):
print_verbose(
f"Logging Details LiteLLM-Failure Call"
)
try:
start_time, end_time = self._failure_handler_helper_fn(exception=exception, traceback_exception=traceback_exception, start_time=start_time, end_time=end_time)
result = None # result sent to all loggers, init this to None incase it's not created
for callback in litellm.failure_callback:
try:
if callback == "lite_debugger":
print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
result = {
"model": self.model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
self.model, messages=self.messages
),
"completion_tokens": 0,
},
}
liteDebuggerClient.log_event(
model=self.model,
messages=self.messages,
end_user=self.model_call_details.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=self.litellm_call_id,
print_verbose=print_verbose,
call_type = self.call_type,
stream = self.stream,
)
elif callback == "llmonitor":
print_verbose("reaches llmonitor for logging error!")
model = self.model
input = self.model_call_details["input"]
type = "embed" if self.call_type == CallTypes.embedding.value else "llm"
llmonitorLogger.log_event(
type=type,
event="error",
user_id=self.model_call_details.get("user", "default"),
model=model,
input=input,
error=traceback_exception,
run_id=self.litellm_call_id,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "sentry":
print_verbose("sending exception to sentry")
if capture_exception:
capture_exception(exception)
else:
print_verbose(f"capture exception not initialized: {capture_exception}")
elif callable(callback): # custom logger functions
customLogger.log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback
)
elif isinstance(callback, CustomLogger) and self.model_call_details.get("litellm_params", {}).get("acompletion", False) == False and self.model_call_details.get("litellm_params", {}).get("aembedding", False) == False: # custom logger class
callback.log_failure_event(
start_time=start_time,
end_time=end_time,
response_obj=result,
kwargs=self.model_call_details,
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging {traceback.format_exc()}"
)
pass
async def async_failure_handler(self, exception, traceback_exception, start_time=None, end_time=None):
"""
Implementing async callbacks, to handle asyncio event loop issues when custom integrations need to use async functions.
"""
start_time, end_time = self._failure_handler_helper_fn(exception=exception, traceback_exception=traceback_exception, start_time=start_time, end_time=end_time)
result = None # result sent to all loggers, init this to None incase it's not created
for callback in litellm._async_failure_callback:
try:
if isinstance(callback, CustomLogger): # custom logger class
await callback.async_log_failure_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
)
if callable(callback): # custom logger functions
await customLogger.async_log_event(
kwargs=self.model_call_details,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
callback_func=callback
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}"
)
def exception_logging(
additional_args={},
logger_fn=None,
exception=None,
):
try:
model_call_details = {}
if exception:
model_call_details["exception"] = exception
model_call_details["additional_args"] = additional_args
# User Logging -> if you pass in a custom logging function or want to use sentry breadcrumbs
print_verbose(
f"Logging Details: logger_fn - {logger_fn} | callable(logger_fn) - {callable(logger_fn)}"
)
if logger_fn and callable(logger_fn):
try:
logger_fn(
model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
except Exception as e:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
pass
####### RULES ###################
class Rules:
"""
Fail calls based on the input or llm api output
Example usage:
import litellm
def my_custom_rule(input): # receives the model response
if "i don't think i can answer" in input: # trigger fallback if the model refuses to answer
return False
return True
litellm.post_call_rules = [my_custom_rule] # have these be functions that can be called to fail a call
response = litellm.completion(model="gpt-3.5-turbo", messages=[{"role": "user",
"content": "Hey, how's it going?"}], fallbacks=["openrouter/mythomax"])
"""
def __init__(self) -> None:
pass
def pre_call_rules(self, input: str, model: str):
for rule in litellm.pre_call_rules:
if callable(rule):
decision = rule(input)
if decision is False:
raise litellm.APIResponseValidationError(message="LLM Response failed post-call-rule check", llm_provider="", model=model) # type: ignore
return True
def post_call_rules(self, input: str, model: str):
for rule in litellm.post_call_rules:
if callable(rule):
decision = rule(input)
if decision is False:
raise litellm.APIResponseValidationError(message="LLM Response failed post-call-rule check", llm_provider="", model=model) # type: ignore
return True
####### CLIENT ###################
# make it easy to log if completion/embedding runs succeeded or failed + see what happened | Non-Blocking
def client(original_function):
global liteDebuggerClient, get_all_keys
rules_obj = Rules()
def function_setup(
start_time, *args, **kwargs
): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc.
try:
global callback_list, add_breadcrumb, user_logger_fn, Logging
function_id = kwargs["id"] if "id" in kwargs else None
if litellm.use_client or ("use_client" in kwargs and kwargs["use_client"] == True):
print_verbose(f"litedebugger initialized")
if "lite_debugger" not in litellm.input_callback:
litellm.input_callback.append("lite_debugger")
if "lite_debugger" not in litellm.success_callback:
litellm.success_callback.append("lite_debugger")
if "lite_debugger" not in litellm.failure_callback:
litellm.failure_callback.append("lite_debugger")
if len(litellm.callbacks) > 0:
for callback in litellm.callbacks:
if callback not in litellm.input_callback:
litellm.input_callback.append(callback)
if callback not in litellm.success_callback:
litellm.success_callback.append(callback)
if callback not in litellm.failure_callback:
litellm.failure_callback.append(callback)
if callback not in litellm._async_success_callback:
litellm._async_success_callback.append(callback)
if callback not in litellm._async_failure_callback:
litellm._async_failure_callback.append(callback)
print_verbose(f"Initialized litellm callbacks, Async Success Callbacks: {litellm._async_success_callback}")
if (
len(litellm.input_callback) > 0
or len(litellm.success_callback) > 0
or len(litellm.failure_callback) > 0
) and len(callback_list) == 0:
callback_list = list(
set(
litellm.input_callback
+ litellm.success_callback
+ litellm.failure_callback
)
)
set_callbacks(
callback_list=callback_list,
function_id=function_id
)
## ASYNC CALLBACKS
if len(litellm.input_callback) > 0:
removed_async_items = []
for index, callback in enumerate(litellm.input_callback):
if inspect.iscoroutinefunction(callback):
litellm._async_input_callback.append(callback)
removed_async_items.append(index)
# Pop the async items from input_callback in reverse order to avoid index issues
for index in reversed(removed_async_items):
litellm.input_callback.pop(index)
if len(litellm.success_callback) > 0:
removed_async_items = []
for index, callback in enumerate(litellm.success_callback):
if inspect.iscoroutinefunction(callback):
litellm._async_success_callback.append(callback)
removed_async_items.append(index)
# Pop the async items from success_callback in reverse order to avoid index issues
for index in reversed(removed_async_items):
litellm.success_callback.pop(index)
if len(litellm.failure_callback) > 0:
removed_async_items = []
for index, callback in enumerate(litellm.failure_callback):
if inspect.iscoroutinefunction(callback):
litellm._async_failure_callback.append(callback)
removed_async_items.append(index)
# Pop the async items from failure_callback in reverse order to avoid index issues
for index in reversed(removed_async_items):
litellm.failure_callback.pop(index)
if add_breadcrumb:
add_breadcrumb(
category="litellm.llm_call",
message=f"Positional Args: {args}, Keyword Args: {kwargs}",
level="info",
)
if "logger_fn" in kwargs:
user_logger_fn = kwargs["logger_fn"]
# CRASH REPORTING TELEMETRY
crash_reporting(*args, **kwargs)
# INIT LOGGER - for user-specified integrations
model = args[0] if len(args) > 0 else kwargs["model"]
call_type = original_function.__name__
if call_type == CallTypes.completion.value or call_type == CallTypes.acompletion.value:
if len(args) > 1:
messages = args[1]
elif kwargs.get("messages", None):
messages = kwargs["messages"]
### PRE-CALL RULES ###
if isinstance(messages, list) and len(messages) > 0 and isinstance(messages[0], dict) and "content" in messages[0]:
rules_obj.pre_call_rules(input="".join(m["content"] for m in messages if isinstance(m["content"], str)), model=model)
elif call_type == CallTypes.embedding.value or call_type == CallTypes.aembedding.value:
messages = args[1] if len(args) > 1 else kwargs["input"]
stream = True if "stream" in kwargs and kwargs["stream"] == True else False
logging_obj = Logging(model=model, messages=messages, stream=stream, litellm_call_id=kwargs["litellm_call_id"], function_id=function_id, call_type=call_type, start_time=start_time)
return logging_obj
except Exception as e:
import logging
logging.debug(f"[Non-Blocking] {traceback.format_exc()}; args - {args}; kwargs - {kwargs}")
raise e
def post_call_processing(original_response, model):
try:
call_type = original_function.__name__
if call_type == CallTypes.completion.value or call_type == CallTypes.acompletion.value:
model_response = original_response['choices'][0]['message']['content']
### POST-CALL RULES ###
rules_obj.post_call_rules(input=model_response, model=model)
except Exception as e:
raise e
def crash_reporting(*args, **kwargs):
if litellm.telemetry:
try:
model = args[0] if len(args) > 0 else kwargs["model"]
exception = kwargs["exception"] if "exception" in kwargs else None
custom_llm_provider = (
kwargs["custom_llm_provider"]
if "custom_llm_provider" in kwargs
else None
)
safe_crash_reporting(
model=model,
exception=exception,
custom_llm_provider=custom_llm_provider,
) # log usage-crash details. Do not log any user details. If you want to turn this off, set `litellm.telemetry=False`.
except:
# [Non-Blocking Error]
pass
def wrapper(*args, **kwargs):
start_time = datetime.datetime.now()
result = None
logging_obj = kwargs.get("litellm_logging_obj", None)
# only set litellm_call_id if its not in kwargs
if "litellm_call_id" not in kwargs:
kwargs["litellm_call_id"] = str(uuid.uuid4())
try:
model = args[0] if len(args) > 0 else kwargs["model"]
except:
raise ValueError("model param not passed in.")
try:
if logging_obj is None:
logging_obj = function_setup(start_time, *args, **kwargs)
kwargs["litellm_logging_obj"] = logging_obj
# [OPTIONAL] CHECK BUDGET
if litellm.max_budget:
if litellm._current_cost > litellm.max_budget:
raise BudgetExceededError(current_cost=litellm._current_cost, max_budget=litellm.max_budget)
# [OPTIONAL] CHECK CACHE
# remove this after deprecating litellm.caching
if (litellm.caching or litellm.caching_with_models) and litellm.cache is None:
litellm.cache = Cache()
print_verbose(f"kwargs[caching]: {kwargs.get('caching', False)}; litellm.cache: {litellm.cache}")
# if caching is false, don't run this
if (kwargs.get("caching", None) is None and litellm.cache is not None) or kwargs.get("caching", False) == True: # allow users to control returning cached responses from the completion function
# checking cache
if (litellm.cache != None or litellm.caching or litellm.caching_with_models):
print_verbose(f"Checking Cache")
preset_cache_key = litellm.cache.get_cache_key(*args, **kwargs)
kwargs["preset_cache_key"] = preset_cache_key # for streaming calls, we need to pass the preset_cache_key
cached_result = litellm.cache.get_cache(*args, **kwargs)
if cached_result != None:
if "detail" in cached_result:
# implies an error occurred
pass
else:
call_type = original_function.__name__
print_verbose(f"Cache Response Object routing: call_type - {call_type}; cached_result instace: {type(cached_result)}")
if call_type == CallTypes.completion.value and isinstance(cached_result, dict):
return convert_to_model_response_object(response_object=cached_result, model_response_object=ModelResponse(), stream = kwargs.get("stream", False))
elif call_type == CallTypes.embedding.value and isinstance(cached_result, dict):
return convert_to_model_response_object(response_object=cached_result, response_type="embedding")
else:
return cached_result
# MODEL CALL
result = original_function(*args, **kwargs)
end_time = datetime.datetime.now()
if "stream" in kwargs and kwargs["stream"] == True:
# TODO: Add to cache for streaming
if "complete_response" in kwargs and kwargs["complete_response"] == True:
chunks = []
for idx, chunk in enumerate(result):
chunks.append(chunk)
return litellm.stream_chunk_builder(chunks, messages=kwargs.get("messages", None))
else:
return result
elif "acompletion" in kwargs and kwargs["acompletion"] == True:
return result
elif "aembedding" in kwargs and kwargs["aembedding"] == True:
return result
### POST-CALL RULES ###
post_call_processing(original_response=result, model=model)
# [OPTIONAL] ADD TO CACHE
if litellm.caching or litellm.caching_with_models or litellm.cache != None: # user init a cache object
litellm.cache.add_cache(result, *args, **kwargs)
# LOG SUCCESS - handle streaming success logging in the _next_ object, remove `handle_success` once it's deprecated
print_verbose(f"Wrapper: Completed Call, calling success_handler")
threading.Thread(target=logging_obj.success_handler, args=(result, start_time, end_time)).start()
# threading.Thread(target=logging_obj.success_handler, args=(result, start_time, end_time)).start()
my_thread = threading.Thread(
target=handle_success, args=(args, kwargs, result, start_time, end_time)
) # don't interrupt execution of main thread
my_thread.start()
# RETURN RESULT
result._response_ms = (end_time - start_time).total_seconds() * 1000 # return response latency in ms like openai
return result
except Exception as e:
call_type = original_function.__name__
if call_type == CallTypes.completion.value:
num_retries = (
kwargs.get("num_retries", None)
or litellm.num_retries
or None
)
litellm.num_retries = None # set retries to None to prevent infinite loops
context_window_fallback_dict = kwargs.get("context_window_fallback_dict", {})
if num_retries:
if (isinstance(e, openai.APIError)
or isinstance(e, openai.Timeout)):
kwargs["num_retries"] = num_retries
return litellm.completion_with_retries(*args, **kwargs)
elif isinstance(e, litellm.exceptions.ContextWindowExceededError) and context_window_fallback_dict and model in context_window_fallback_dict:
if len(args) > 0:
args[0] = context_window_fallback_dict[model]
else:
kwargs["model"] = context_window_fallback_dict[model]
return original_function(*args, **kwargs)
traceback_exception = traceback.format_exc()
crash_reporting(*args, **kwargs, exception=traceback_exception)
end_time = datetime.datetime.now()
# LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated
if logging_obj:
logging_obj.failure_handler(e, traceback_exception, start_time, end_time) # DO NOT MAKE THREADED - router retry fallback relies on this!
my_thread = threading.Thread(
target=handle_failure,
args=(e, traceback_exception, start_time, end_time, args, kwargs),
) # don't interrupt execution of main thread
my_thread.start()
if hasattr(e, "message"):
if (
liteDebuggerClient and liteDebuggerClient.dashboard_url != None
): # make it easy to get to the debugger logs if you've initialized it
e.message += f"\n Check the log in your dashboard - {liteDebuggerClient.dashboard_url}"
raise e
async def wrapper_async(*args, **kwargs):
start_time = datetime.datetime.now()
result = None
logging_obj = kwargs.get("litellm_logging_obj", None)
# only set litellm_call_id if its not in kwargs
if "litellm_call_id" not in kwargs:
kwargs["litellm_call_id"] = str(uuid.uuid4())
try:
model = args[0] if len(args) > 0 else kwargs["model"]
except:
raise ValueError("model param not passed in.")
try:
if logging_obj is None:
logging_obj = function_setup(start_time, *args, **kwargs)
kwargs["litellm_logging_obj"] = logging_obj
# [OPTIONAL] CHECK BUDGET
if litellm.max_budget:
if litellm._current_cost > litellm.max_budget:
raise BudgetExceededError(current_cost=litellm._current_cost, max_budget=litellm.max_budget)
# [OPTIONAL] CHECK CACHE
print_verbose(f"litellm.cache: {litellm.cache}")
print_verbose(f"kwargs[caching]: {kwargs.get('caching', False)}; litellm.cache: {litellm.cache}")
# if caching is false, don't run this
if (kwargs.get("caching", None) is None and litellm.cache is not None) or kwargs.get("caching", False) == True: # allow users to control returning cached responses from the completion function
# checking cache
print_verbose(f"INSIDE CHECKING CACHE")
if litellm.cache is not None:
print_verbose(f"Checking Cache")
cached_result = litellm.cache.get_cache(*args, **kwargs)
if cached_result != None:
print_verbose(f"Cache Hit!")
call_type = original_function.__name__
if call_type == CallTypes.acompletion.value and isinstance(cached_result, dict):
if kwargs.get("stream", False) == True:
return convert_to_streaming_response_async(
response_object=cached_result,
)
else:
return convert_to_model_response_object(response_object=cached_result, model_response_object=ModelResponse())
else:
return cached_result
# MODEL CALL
result = await original_function(*args, **kwargs)
end_time = datetime.datetime.now()
if "stream" in kwargs and kwargs["stream"] == True:
if "complete_response" in kwargs and kwargs["complete_response"] == True:
chunks = []
for idx, chunk in enumerate(result):
chunks.append(chunk)
return litellm.stream_chunk_builder(chunks, messages=kwargs.get("messages", None))
else:
return result
### POST-CALL RULES ###
post_call_processing(original_response=result, model=model)
# [OPTIONAL] ADD TO CACHE
if litellm.caching or litellm.caching_with_models or litellm.cache != None: # user init a cache object
litellm.cache.add_cache(result, *args, **kwargs)
# LOG SUCCESS - handle streaming success logging in the _next_ object
print_verbose(f"Async Wrapper: Completed Call, calling async_success_handler: {logging_obj.async_success_handler}")
asyncio.create_task(logging_obj.async_success_handler(result, start_time, end_time))
threading.Thread(target=logging_obj.success_handler, args=(result, start_time, end_time)).start()
# RETURN RESULT
if isinstance(result, ModelResponse):
result._response_ms = (end_time - start_time).total_seconds() * 1000 # return response latency in ms like openai
return result
except Exception as e:
traceback_exception = traceback.format_exc()
crash_reporting(*args, **kwargs, exception=traceback_exception)
end_time = datetime.datetime.now()
if logging_obj:
try:
logging_obj.failure_handler(e, traceback_exception, start_time, end_time) # DO NOT MAKE THREADED - router retry fallback relies on this!
except Exception as e:
raise e
try:
await logging_obj.async_failure_handler(e, traceback_exception, start_time, end_time)
except Exception as e:
raise e
call_type = original_function.__name__
if call_type == CallTypes.acompletion.value:
num_retries = (
kwargs.get("num_retries", None)
or litellm.num_retries
or None
)
litellm.num_retries = None # set retries to None to prevent infinite loops
context_window_fallback_dict = kwargs.get("context_window_fallback_dict", {})
if num_retries:
try:
kwargs["num_retries"] = num_retries
kwargs["original_function"] = original_function
if (isinstance(e, openai.RateLimitError)): # rate limiting specific error
kwargs["retry_strategy"] = "exponential_backoff_retry"
elif (isinstance(e, openai.APIError)): # generic api error
kwargs["retry_strategy"] = "constant_retry"
return await litellm.acompletion_with_retries(*args, **kwargs)
except:
pass
elif isinstance(e, litellm.exceptions.ContextWindowExceededError) and context_window_fallback_dict and model in context_window_fallback_dict:
if len(args) > 0:
args[0] = context_window_fallback_dict[model]
else:
kwargs["model"] = context_window_fallback_dict[model]
return await original_function(*args, **kwargs)
raise e
is_coroutine = inspect.iscoroutinefunction(original_function)
# Return the appropriate wrapper based on the original function type
if is_coroutine:
return wrapper_async
else:
return wrapper
####### USAGE CALCULATOR ################
# Extract the number of billion parameters from the model name
# only used for together_computer LLMs
def get_model_params_and_category(model_name):
import re
params_match = re.search(r'(\d+b)', model_name) # catch all decimals like 3b, 70b, etc
category = None
if params_match != None:
params_match = params_match.group(1)
params_match = params_match.replace("b", "")
params_billion = float(params_match)
# Determine the category based on the number of parameters
if params_billion <= 3.0:
category = "together-ai-up-to-3b"
elif params_billion <= 7.0:
category = "together-ai-3.1b-7b"
elif params_billion <= 20.0:
category = "together-ai-7.1b-20b"
elif params_billion <= 40.0:
category = "together-ai-20.1b-40b"
elif params_billion <= 70.0:
category = "together-ai-40.1b-70b"
return category
return None
def get_replicate_completion_pricing(completion_response=None, total_time=0.0):
# see https://replicate.com/pricing
a100_40gb_price_per_second_public = 0.001150
# for all litellm currently supported LLMs, almost all requests go to a100_80gb
a100_80gb_price_per_second_public = 0.001400 # assume all calls sent to A100 80GB for now
if total_time == 0.0:
start_time = completion_response['created']
end_time = completion_response["ended"]
total_time = end_time - start_time
return a100_80gb_price_per_second_public*total_time
def _select_tokenizer(model: str):
# cohere
import pkg_resources
if model in litellm.cohere_models:
tokenizer = Tokenizer.from_pretrained("Cohere/command-nightly")
return {"type": "huggingface_tokenizer", "tokenizer": tokenizer}
# anthropic
elif model in litellm.anthropic_models:
# Read the JSON file
filename = pkg_resources.resource_filename(__name__, 'llms/tokenizers/anthropic_tokenizer.json')
with open(filename, 'r') as f:
json_data = json.load(f)
# Decode the JSON data from utf-8
json_data_decoded = json.dumps(json_data, ensure_ascii=False)
# Convert to str
json_str = str(json_data_decoded)
# load tokenizer
tokenizer = Tokenizer.from_str(json_str)
return {"type": "huggingface_tokenizer", "tokenizer": tokenizer}
# llama2
elif "llama-2" in model.lower():
tokenizer = Tokenizer.from_pretrained("hf-internal-testing/llama-tokenizer")
return {"type": "huggingface_tokenizer", "tokenizer": tokenizer}
# default - tiktoken
else:
return {"type": "openai_tokenizer", "tokenizer": encoding}
def encode(model: str, text: str):
"""
Encodes the given text using the specified model.
Args:
model (str): The name of the model to use for tokenization.
text (str): The text to be encoded.
Returns:
enc: The encoded text.
"""
tokenizer_json = _select_tokenizer(model=model)
enc = tokenizer_json["tokenizer"].encode(text)
return enc
def decode(model: str, tokens: List[int]):
tokenizer_json = _select_tokenizer(model=model)
dec = tokenizer_json["tokenizer"].decode(tokens)
return dec
def openai_token_counter(messages: Optional[list]=None, model="gpt-3.5-turbo-0613", text: Optional[str]= None):
"""
Return the number of tokens used by a list of messages.
Borrowed from https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb.
"""
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
print_verbose("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model == "gpt-3.5-turbo-0301":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
elif model in litellm.open_ai_chat_completion_models:
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
)
num_tokens = 0
if text:
num_tokens = len(encoding.encode(text, disallowed_special=()))
elif messages:
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value, disallowed_special=()))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
def token_counter(model="", text=None, messages: Optional[List] = None):
"""
Count the number of tokens in a given text using a specified model.
Args:
model (str): The name of the model to use for tokenization. Default is an empty string.
text (str): The raw text string to be passed to the model. Default is None.
messages (Optional[List[Dict[str, str]]]): Alternative to passing in text. A list of dictionaries representing messages with "role" and "content" keys. Default is None.
Returns:
int: The number of tokens in the text.
"""
# use tiktoken, anthropic, cohere or llama2's tokenizer depending on the model
if text == None:
if messages is not None:
print_verbose(f"token_counter messages received: {messages}")
text = ""
for message in messages:
if message.get("content", None):
text += message["content"]
if 'tool_calls' in message:
for tool_call in message['tool_calls']:
if 'function' in tool_call:
function_arguments = tool_call['function']['arguments']
text += function_arguments
else:
raise ValueError("text and messages cannot both be None")
num_tokens = 0
if model is not None:
tokenizer_json = _select_tokenizer(model=model)
if tokenizer_json["type"] == "huggingface_tokenizer":
enc = tokenizer_json["tokenizer"].encode(text)
num_tokens = len(enc.ids)
elif tokenizer_json["type"] == "openai_tokenizer":
if model in litellm.open_ai_chat_completion_models:
num_tokens = openai_token_counter(text=text, model=model)
else:
enc = tokenizer_json["tokenizer"].encode(text)
num_tokens = len(enc)
else:
num_tokens = len(encoding.encode(text))
return num_tokens
def cost_per_token(model="", prompt_tokens=0, completion_tokens=0):
"""
Calculates the cost per token for a given model, prompt tokens, and completion tokens.
Parameters:
model (str): The name of the model to use. Default is ""
prompt_tokens (int): The number of tokens in the prompt.
completion_tokens (int): The number of tokens in the completion.
Returns:
tuple: A tuple containing the cost in USD dollars for prompt tokens and completion tokens, respectively.
"""
# given
prompt_tokens_cost_usd_dollar = 0
completion_tokens_cost_usd_dollar = 0
model_cost_ref = litellm.model_cost
# see this https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models
azure_llms = {
"gpt-35-turbo": "azure/gpt-3.5-turbo",
"gpt-35-turbo-16k": "azure/gpt-3.5-turbo-16k",
"gpt-35-turbo-instruct": "azure/gpt-3.5-turbo-instruct"
}
if model in model_cost_ref:
prompt_tokens_cost_usd_dollar = (
model_cost_ref[model]["input_cost_per_token"] * prompt_tokens
)
completion_tokens_cost_usd_dollar = (
model_cost_ref[model]["output_cost_per_token"] * completion_tokens
)
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
elif "ft:gpt-3.5-turbo" in model:
# fuzzy match ft:gpt-3.5-turbo:abcd-id-cool-litellm
prompt_tokens_cost_usd_dollar = (
model_cost_ref["ft:gpt-3.5-turbo"]["input_cost_per_token"] * prompt_tokens
)
completion_tokens_cost_usd_dollar = (
model_cost_ref["ft:gpt-3.5-turbo"]["output_cost_per_token"] * completion_tokens
)
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
elif model in azure_llms:
model = azure_llms[model]
prompt_tokens_cost_usd_dollar = (
model_cost_ref[model]["input_cost_per_token"] * prompt_tokens
)
completion_tokens_cost_usd_dollar = (
model_cost_ref[model]["output_cost_per_token"] * completion_tokens
)
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
else:
# calculate average input cost, azure/gpt-deployments can potentially go here if users don't specify, gpt-4, gpt-3.5-turbo. LLMs litellm knows
input_cost_sum = 0
output_cost_sum = 0
model_cost_ref = litellm.model_cost
for model in model_cost_ref:
input_cost_sum += model_cost_ref[model]["input_cost_per_token"]
output_cost_sum += model_cost_ref[model]["output_cost_per_token"]
avg_input_cost = input_cost_sum / len(model_cost_ref.keys())
avg_output_cost = output_cost_sum / len(model_cost_ref.keys())
prompt_tokens_cost_usd_dollar = avg_input_cost * prompt_tokens
completion_tokens_cost_usd_dollar = avg_output_cost * completion_tokens
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
def completion_cost(
completion_response=None,
model=None,
prompt="",
messages: List = [],
completion="",
total_time=0.0, # used for replicate
):
"""
Calculate the cost of a given completion call fot GPT-3.5-turbo, llama2, any litellm supported llm.
Parameters:
completion_response (litellm.ModelResponses): [Required] The response received from a LiteLLM completion request.
[OPTIONAL PARAMS]
model (str): Optional. The name of the language model used in the completion calls
prompt (str): Optional. The input prompt passed to the llm
completion (str): Optional. The output completion text from the llm
total_time (float): Optional. (Only used for Replicate LLMs) The total time used for the request in seconds
Returns:
float: The cost in USD dollars for the completion based on the provided parameters.
Note:
- If completion_response is provided, the function extracts token information and the model name from it.
- If completion_response is not provided, the function calculates token counts based on the model and input text.
- The cost is calculated based on the model, prompt tokens, and completion tokens.
- For certain models containing "togethercomputer" in the name, prices are based on the model size.
- For Replicate models, the cost is calculated based on the total time used for the request.
Exceptions:
- If an error occurs during execution, the function returns 0.0 without blocking the user's execution path.
"""
try:
# Handle Inputs to completion_cost
prompt_tokens = 0
completion_tokens = 0
if completion_response is not None:
# get input/output tokens from completion_response
prompt_tokens = completion_response['usage']['prompt_tokens']
completion_tokens = completion_response['usage']['completion_tokens']
model = model or completion_response['model'] # check if user passed an override for model, if it's none check completion_response['model']
else:
if len(messages) > 0:
prompt_tokens = token_counter(model=model, messages=messages)
elif len(prompt) > 0:
prompt_tokens = token_counter(model=model, text=prompt)
completion_tokens = token_counter(model=model, text=completion)
# Calculate cost based on prompt_tokens, completion_tokens
if "togethercomputer" in model:
# together ai prices based on size of llm
# get_model_params_and_category takes a model name and returns the category of LLM size it is in model_prices_and_context_window.json
model = get_model_params_and_category(model)
# replicate llms are calculate based on time for request running
# see https://replicate.com/pricing
elif (
model in litellm.replicate_models or
"replicate" in model
):
return get_replicate_completion_pricing(completion_response, total_time)
prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar = cost_per_token(
model=model, prompt_tokens=prompt_tokens, completion_tokens=completion_tokens
)
return prompt_tokens_cost_usd_dollar + completion_tokens_cost_usd_dollar
except:
return 0.0 # this should not block a users execution path
####### HELPER FUNCTIONS ################
def register_model(model_cost: Union[str, dict]):
"""
Register new / Override existing models (and their pricing) to specific providers.
Provide EITHER a model cost dictionary or a url to a hosted json blob
Example usage:
model_cost_dict = {
"gpt-4": {
"max_tokens": 8192,
"input_cost_per_token": 0.00003,
"output_cost_per_token": 0.00006,
"litellm_provider": "openai",
"mode": "chat"
},
}
"""
loaded_model_cost = {}
if isinstance(model_cost, dict):
loaded_model_cost = model_cost
elif isinstance(model_cost, str):
loaded_model_cost = litellm.get_model_cost_map(url=model_cost)
for key, value in loaded_model_cost.items():
## override / add new keys to the existing model cost dictionary
if key in litellm.model_cost:
for k,v in loaded_model_cost[key].items():
litellm.model_cost[key][k] = v
# add new model names to provider lists
if value.get('litellm_provider') == 'openai':
if key not in litellm.open_ai_chat_completion_models:
litellm.open_ai_chat_completion_models.append(key)
elif value.get('litellm_provider') == 'text-completion-openai':
if key not in litellm.open_ai_text_completion_models:
litellm.open_ai_text_completion_models.append(key)
elif value.get('litellm_provider') == 'cohere':
if key not in litellm.cohere_models:
litellm.cohere_models.append(key)
elif value.get('litellm_provider') == 'anthropic':
if key not in litellm.anthropic_models:
litellm.anthropic_models.append(key)
elif value.get('litellm_provider') == 'openrouter':
split_string = key.split('/', 1)
if key not in litellm.openrouter_models:
litellm.openrouter_models.append(split_string[1])
elif value.get('litellm_provider') == 'vertex_ai-text-models':
if key not in litellm.vertex_text_models:
litellm.vertex_text_models.append(key)
elif value.get('litellm_provider') == 'vertex_ai-code-text-models':
if key not in litellm.vertex_code_text_models:
litellm.vertex_code_text_models.append(key)
elif value.get('litellm_provider') == 'vertex_ai-chat-models':
if key not in litellm.vertex_chat_models:
litellm.vertex_chat_models.append(key)
elif value.get('litellm_provider') == 'vertex_ai-code-chat-models':
if key not in litellm.vertex_code_chat_models:
litellm.vertex_code_chat_models.append(key)
elif value.get('litellm_provider') == 'ai21':
if key not in litellm.ai21_models:
litellm.ai21_models.append(key)
elif value.get('litellm_provider') == 'nlp_cloud':
if key not in litellm.nlp_cloud_models:
litellm.nlp_cloud_models.append(key)
elif value.get('litellm_provider') == 'aleph_alpha':
if key not in litellm.aleph_alpha_models:
litellm.aleph_alpha_models.append(key)
elif value.get('litellm_provider') == 'bedrock':
if key not in litellm.bedrock_models:
litellm.bedrock_models.append(key)
return model_cost
def get_litellm_params(
api_key=None,
force_timeout=600,
azure=False,
logger_fn=None,
verbose=False,
hugging_face=False,
replicate=False,
together_ai=False,
custom_llm_provider=None,
api_base=None,
litellm_call_id=None,
model_alias_map=None,
completion_call_id=None,
metadata=None,
model_info=None,
proxy_server_request=None,
acompletion=None,
preset_cache_key = None
):
litellm_params = {
"acompletion": acompletion,
"api_key": api_key,
"force_timeout": force_timeout,
"logger_fn": logger_fn,
"verbose": verbose,
"custom_llm_provider": custom_llm_provider,
"api_base": api_base,
"litellm_call_id": litellm_call_id,
"model_alias_map": model_alias_map,
"completion_call_id": completion_call_id,
"metadata": metadata,
"model_info": model_info,
"proxy_server_request": proxy_server_request,
"preset_cache_key": preset_cache_key,
"stream_response": {} # litellm_call_id: ModelResponse Dict
}
return litellm_params
def get_optional_params_embeddings(
# 2 optional params
user=None,
encoding_format=None,
custom_llm_provider="",
**kwargs
):
# retrieve all parameters passed to the function
passed_params = locals()
custom_llm_provider = passed_params.pop("custom_llm_provider", None)
special_params = passed_params.pop("kwargs")
for k, v in special_params.items():
passed_params[k] = v
default_params = {
"user": None,
"encoding_format": None
}
non_default_params = {k: v for k, v in passed_params.items() if (k in default_params and v != default_params[k])}
## raise exception if non-default value passed for non-openai/azure embedding calls
if custom_llm_provider != "openai" and custom_llm_provider != "azure":
if len(non_default_params.keys()) > 0:
if litellm.drop_params is True:
for k in non_default_params.keys():
passed_params.pop(k, None)
return passed_params
raise UnsupportedParamsError(status_code=500, message=f"Setting user/encoding format is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.")
final_params = {**non_default_params, **kwargs}
return final_params
def get_optional_params( # use the openai defaults
# 12 optional params
functions=[],
function_call="",
temperature=None,
top_p=None,
n=None,
stream=False,
stop=None,
max_tokens=None,
presence_penalty=None,
frequency_penalty=0,
logit_bias=None,
user=None,
model=None,
custom_llm_provider="",
response_format=None,
seed=None,
tools=None,
tool_choice=None,
max_retries=None,
**kwargs
):
# retrieve all parameters passed to the function
passed_params = locals()
special_params = passed_params.pop("kwargs")
for k, v in special_params.items():
passed_params[k] = v
default_params = {
"functions":[],
"function_call":"",
"temperature":None,
"top_p":None,
"n":None,
"stream":None,
"stop":None,
"max_tokens":None,
"presence_penalty":None,
"frequency_penalty":None,
"logit_bias": None,
"user":None,
"model":None,
"custom_llm_provider":"",
"response_format": None,
"seed": None,
"tools": None,
"tool_choice": None,
"max_retries": None,
}
# filter out those parameters that were passed with non-default values
non_default_params = {k: v for k, v in passed_params.items() if (k != "model" and k != "custom_llm_provider" and k in default_params and v != default_params[k])}
optional_params = {}
## raise exception if function calling passed in for a provider that doesn't support it
if "functions" in non_default_params or "function_call" in non_default_params:
if custom_llm_provider != "openai" and custom_llm_provider != "text-completion-openai" and custom_llm_provider != "azure":
if litellm.add_function_to_prompt: # if user opts to add it to prompt instead
optional_params["functions_unsupported_model"] = non_default_params.pop("functions")
else:
raise UnsupportedParamsError(status_code=500, message=f"Function calling is not supported by {custom_llm_provider}. To add it to the prompt, set `litellm.add_function_to_prompt = True`.")
def _check_valid_arg(supported_params):
print_verbose(f"\nLiteLLM completion() model= {model}; provider = {custom_llm_provider}")
print_verbose(f"\nLiteLLM: Params passed to completion() {passed_params}")
print_verbose(f"\nLiteLLM: Non-Default params passed to completion() {non_default_params}")
unsupported_params = {}
for k in non_default_params.keys():
if k not in supported_params:
if k == "n" and n == 1: # langchain sends n=1 as a default value
continue # skip this param
if k == "max_retries": # TODO: This is a patch. We support max retries for OpenAI, Azure. For non OpenAI LLMs we need to add support for max retries
continue # skip this param
# Always keeps this in elif code blocks
else:
unsupported_params[k] = non_default_params[k]
if unsupported_params and not litellm.drop_params:
raise UnsupportedParamsError(status_code=500, message=f"{custom_llm_provider} does not support parameters: {unsupported_params}. To drop these, set `litellm.drop_params=True`.")
def _map_and_modify_arg(supported_params: dict, provider: str, model: str):
"""
filter params to fit the required provider format, drop those that don't fit if user sets `litellm.drop_params = True`.
"""
filtered_stop = None
if "stop" in supported_params and litellm.drop_params:
if provider == "bedrock" and "amazon" in model:
filtered_stop = []
if isinstance(stop, list):
for s in stop:
if re.match(r'^(\|+|User:)$', s):
filtered_stop.append(s)
if filtered_stop is not None:
supported_params["stop"] = filtered_stop
return supported_params
## raise exception if provider doesn't support passed in param
if custom_llm_provider == "anthropic":
## check if unsupported param passed in
supported_params = ["stream", "stop", "temperature", "top_p", "max_tokens"]
_check_valid_arg(supported_params=supported_params)
# handle anthropic params
if stream:
optional_params["stream"] = stream
if stop is not None:
if type(stop) == str:
stop = [stop] # openai can accept str/list for stop
optional_params["stop_sequences"] = stop
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if max_tokens is not None:
optional_params["max_tokens_to_sample"] = max_tokens
elif custom_llm_provider == "cohere":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "logit_bias", "top_p", "frequency_penalty", "presence_penalty", "stop", "n"]
_check_valid_arg(supported_params=supported_params)
# handle cohere params
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
if n is not None:
optional_params["num_generations"] = n
if logit_bias is not None:
optional_params["logit_bias"] = logit_bias
if top_p is not None:
optional_params["p"] = top_p
if frequency_penalty is not None:
optional_params["frequency_penalty"] = frequency_penalty
if presence_penalty is not None:
optional_params["presence_penalty"] = presence_penalty
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "maritalk":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "top_p", "presence_penalty", "stop"]
_check_valid_arg(supported_params=supported_params)
# handle cohere params
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
if logit_bias is not None:
optional_params["logit_bias"] = logit_bias
if top_p is not None:
optional_params["p"] = top_p
if presence_penalty is not None:
optional_params["repetition_penalty"] = presence_penalty
if stop is not None:
optional_params["stopping_tokens"] = stop
elif custom_llm_provider == "replicate":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "top_p", "stop", "seed"]
_check_valid_arg(supported_params=supported_params)
if stream:
optional_params["stream"] = stream
return optional_params
if max_tokens is not None:
if "vicuna" in model or "flan" in model:
optional_params["max_length"] = max_tokens
elif "meta/codellama-13b" in model:
optional_params["max_tokens"] = max_tokens
else:
optional_params["max_new_tokens"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "huggingface":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "top_p", "stop", "n"]
_check_valid_arg(supported_params=supported_params)
# temperature, top_p, n, stream, stop, max_tokens, n, presence_penalty default to None
if temperature is not None:
if temperature == 0.0 or temperature == 0:
# hugging face exception raised when temp==0
# Failed: Error occurred: HuggingfaceException - Input validation error: `temperature` must be strictly positive
temperature = 0.01
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if n is not None:
optional_params["best_of"] = n
optional_params["do_sample"] = True # Need to sample if you want best of for hf inference endpoints
if stream is not None:
optional_params["stream"] = stream
if stop is not None:
optional_params["stop"] = stop
if max_tokens is not None:
# HF TGI raises the following exception when max_new_tokens==0
# Failed: Error occurred: HuggingfaceException - Input validation error: `max_new_tokens` must be strictly positive
if max_tokens == 0:
max_tokens = 1
optional_params["max_new_tokens"] = max_tokens
if n is not None:
optional_params["best_of"] = n
if presence_penalty is not None:
optional_params["repetition_penalty"] = presence_penalty
if "echo" in passed_params:
# https://huggingface.co/docs/huggingface_hub/main/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation.decoder_input_details
# Return the decoder input token logprobs and ids. You must set details=True as well for it to be taken into account. Defaults to False
optional_params["decoder_input_details"] = special_params["echo"]
passed_params.pop("echo", None) # since we handle translating echo, we should not send it to TGI request
elif custom_llm_provider == "together_ai":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "top_p", "stop", "frequency_penalty"]
_check_valid_arg(supported_params=supported_params)
if stream:
optional_params["stream_tokens"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
if frequency_penalty is not None:
optional_params["repetition_penalty"] = frequency_penalty # https://docs.together.ai/reference/inference
if stop is not None:
optional_params["stop"] = stop
elif custom_llm_provider == "ai21":
## check if unsupported param passed in
supported_params = ["stream", "n", "temperature", "max_tokens", "top_p", "stop", "frequency_penalty", "presence_penalty"]
_check_valid_arg(supported_params=supported_params)
if stream:
optional_params["stream"] = stream
if n is not None:
optional_params["numResults"] = n
if max_tokens is not None:
optional_params["maxTokens"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["topP"] = top_p
if stop is not None:
optional_params["stopSequences"] = stop
if frequency_penalty is not None:
optional_params["frequencyPenalty"] = {"scale": frequency_penalty}
if presence_penalty is not None:
optional_params["presencePenalty"] = {"scale": presence_penalty}
elif custom_llm_provider == "palm": # https://developers.generativeai.google/tutorials/curl_quickstart
## check if unsupported param passed in
supported_params = ["temperature", "top_p", "stream", "n", "stop", "max_tokens"]
_check_valid_arg(supported_params=supported_params)
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
if n is not None:
optional_params["candidate_count"] = n
if stop is not None:
optional_params["stop_sequences"] = stop
if max_tokens is not None:
optional_params["max_output_tokens"] = max_tokens
elif (
custom_llm_provider == "vertex_ai"
):
## check if unsupported param passed in
supported_params = ["temperature", "top_p", "max_tokens", "stream"]
_check_valid_arg(supported_params=supported_params)
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
if max_tokens is not None:
optional_params["max_output_tokens"] = max_tokens
elif custom_llm_provider == "sagemaker":
## check if unsupported param passed in
supported_params = ["stream", "temperature", "max_tokens", "top_p", "stop", "n"]
_check_valid_arg(supported_params=supported_params)
# temperature, top_p, n, stream, stop, max_tokens, n, presence_penalty default to None
if temperature is not None:
if temperature == 0.0 or temperature == 0:
# hugging face exception raised when temp==0
# Failed: Error occurred: HuggingfaceException - Input validation error: `temperature` must be strictly positive
temperature = 0.01
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if n is not None:
optional_params["best_of"] = n
optional_params["do_sample"] = True # Need to sample if you want best of for hf inference endpoints
if stream is not None:
optional_params["stream"] = stream
if stop is not None:
optional_params["stop"] = stop
if max_tokens is not None:
# HF TGI raises the following exception when max_new_tokens==0
# Failed: Error occurred: HuggingfaceException - Input validation error: `max_new_tokens` must be strictly positive
if max_tokens == 0:
max_tokens = 1
optional_params["max_new_tokens"] = max_tokens
elif custom_llm_provider == "bedrock":
if "ai21" in model:
supported_params = ["max_tokens", "temperature", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# params "maxTokens":200,"temperature":0,"topP":250,"stop_sequences":[],
# https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=j2-ultra
if max_tokens is not None:
optional_params["maxTokens"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["topP"] = top_p
if stream:
optional_params["stream"] = stream
elif "anthropic" in model:
supported_params = ["max_tokens", "temperature", "stop", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# anthropic params on bedrock
# \"max_tokens_to_sample\":300,\"temperature\":0.5,\"top_p\":1,\"stop_sequences\":[\"\\\\n\\\\nHuman:\"]}"
if max_tokens is not None:
optional_params["max_tokens_to_sample"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stop is not None:
optional_params["stop_sequences"] = stop
if stream:
optional_params["stream"] = stream
elif "amazon" in model: # amazon titan llms
supported_params = ["max_tokens", "temperature", "stop", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# see https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=titan-large
if max_tokens is not None:
optional_params["maxTokenCount"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if stop is not None:
filtered_stop = _map_and_modify_arg({"stop": stop}, provider="bedrock", model=model)
optional_params["stopSequences"] = filtered_stop["stop"]
if top_p is not None:
optional_params["topP"] = top_p
if stream:
optional_params["stream"] = stream
elif "meta" in model: # amazon / meta llms
supported_params = ["max_tokens", "temperature", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# see https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=titan-large
if max_tokens is not None:
optional_params["max_gen_len"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
elif "cohere" in model: # cohere models on bedrock
supported_params = ["stream", "temperature", "max_tokens"]
_check_valid_arg(supported_params=supported_params)
# handle cohere params
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
elif custom_llm_provider == "aleph_alpha":
supported_params = ["max_tokens", "stream", "top_p", "temperature", "presence_penalty", "frequency_penalty", "n", "stop"]
_check_valid_arg(supported_params=supported_params)
if max_tokens is not None:
optional_params["maximum_tokens"] = max_tokens
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if presence_penalty is not None:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty is not None:
optional_params["frequency_penalty"] = frequency_penalty
if n is not None:
optional_params["n"] = n
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "ollama":
supported_params = ["max_tokens", "stream", "top_p", "temperature", "frequency_penalty", "stop"]
_check_valid_arg(supported_params=supported_params)
if max_tokens is not None:
optional_params["num_predict"] = max_tokens
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if frequency_penalty is not None:
optional_params["repeat_penalty"] = frequency_penalty
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "nlp_cloud":
supported_params = ["max_tokens", "stream", "temperature", "top_p", "presence_penalty", "frequency_penalty", "n", "stop"]
_check_valid_arg(supported_params=supported_params)
if max_tokens is not None:
optional_params["max_length"] = max_tokens
if stream:
optional_params["stream"] = stream
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if presence_penalty is not None:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty is not None:
optional_params["frequency_penalty"] = frequency_penalty
if n is not None:
optional_params["num_return_sequences"] = n
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "petals":
supported_params = ["max_tokens", "temperature", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# max_new_tokens=1,temperature=0.9, top_p=0.6
if max_tokens is not None:
optional_params["max_new_tokens"] = max_tokens
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
elif custom_llm_provider == "deepinfra":
supported_params = ["temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user"]
_check_valid_arg(supported_params=supported_params)
if temperature is not None:
if temperature == 0 and model == "mistralai/Mistral-7B-Instruct-v0.1": # this model does no support temperature == 0
temperature = 0.0001 # close to 0
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if n:
optional_params["n"] = n
if stream:
optional_params["stream"] = stream
if stop:
optional_params["stop"] = stop
if max_tokens:
optional_params["max_tokens"] = max_tokens
if presence_penalty:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty:
optional_params["frequency_penalty"] = frequency_penalty
if logit_bias:
optional_params["logit_bias"] = logit_bias
if user:
optional_params["user"] = user
elif custom_llm_provider == "perplexity":
supported_params = ["temperature", "top_p", "stream", "max_tokens", "presence_penalty", "frequency_penalty"]
_check_valid_arg(supported_params=supported_params)
if temperature is not None:
if temperature == 0 and model == "mistral-7b-instruct": # this model does no support temperature == 0
temperature = 0.0001 # close to 0
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
if max_tokens:
optional_params["max_tokens"] = max_tokens
if presence_penalty:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty:
optional_params["frequency_penalty"] = frequency_penalty
elif custom_llm_provider == "anyscale":
supported_params = ["temperature", "top_p", "stream", "max_tokens"]
_check_valid_arg(supported_params=supported_params)
optional_params = non_default_params
if temperature is not None:
if temperature == 0 and model == "mistralai/Mistral-7B-Instruct-v0.1": # this model does no support temperature == 0
temperature = 0.0001 # close to 0
optional_params["temperature"] = temperature
if top_p:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
if max_tokens:
optional_params["max_tokens"] = max_tokens
else: # assume passing in params for openai/azure openai
supported_params = ["functions", "function_call", "temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "response_format", "seed", "tools", "tool_choice", "max_retries"]
_check_valid_arg(supported_params=supported_params)
if functions is not None:
optional_params["functions"] = functions
if function_call is not None:
optional_params["function_call"] = function_call
if temperature is not None:
optional_params["temperature"] = temperature
if top_p is not None:
optional_params["top_p"] = top_p
if n is not None:
optional_params["n"] = n
if stream is not None:
optional_params["stream"] = stream
if stop is not None:
optional_params["stop"] = stop
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
if presence_penalty is not None:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty is not None:
optional_params["frequency_penalty"] = frequency_penalty
if logit_bias is not None:
optional_params["logit_bias"] = logit_bias
if user is not None:
optional_params["user"] = user
if response_format is not None:
optional_params["response_format"] = response_format
if seed is not None:
optional_params["seed"] = seed
if tools is not None:
optional_params["tools"] = tools
if tool_choice is not None:
optional_params["tool_choice"] = tool_choice
if max_retries is not None:
optional_params["max_retries"] = max_retries
optional_params = non_default_params
# if user passed in non-default kwargs for specific providers/models, pass them along
for k in passed_params.keys():
if k not in default_params.keys():
optional_params[k] = passed_params[k]
return optional_params
def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_base: Optional[str] = None, api_key: Optional[str] = None):
try:
dynamic_api_key = None
# check if llm provider provided
if custom_llm_provider:
return model, custom_llm_provider, dynamic_api_key, api_base
if api_key and api_key.startswith("os.environ/"):
dynamic_api_key = get_secret(api_key)
# check if llm provider part of model name
if model.split("/",1)[0] in litellm.provider_list and model.split("/",1)[0] not in litellm.model_list:
custom_llm_provider = model.split("/", 1)[0]
model = model.split("/", 1)[1]
if custom_llm_provider == "perplexity":
# perplexity is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.perplexity.ai
api_base = "https://api.perplexity.ai"
dynamic_api_key = get_secret("PERPLEXITYAI_API_KEY")
elif custom_llm_provider == "anyscale":
# anyscale is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.endpoints.anyscale.com/v1
api_base = "https://api.endpoints.anyscale.com/v1"
dynamic_api_key = get_secret("ANYSCALE_API_KEY")
elif custom_llm_provider == "deepinfra":
# deepinfra is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.endpoints.anyscale.com/v1
api_base = "https://api.deepinfra.com/v1/openai"
dynamic_api_key = get_secret("DEEPINFRA_API_KEY")
return model, custom_llm_provider, dynamic_api_key, api_base
# check if api base is a known openai compatible endpoint
if api_base:
for endpoint in litellm.openai_compatible_endpoints:
if endpoint in api_base:
if endpoint == "api.perplexity.ai":
custom_llm_provider = "perplexity"
dynamic_api_key = get_secret("PERPLEXITYAI_API_KEY")
elif endpoint == "api.endpoints.anyscale.com/v1":
custom_llm_provider = "anyscale"
dynamic_api_key = get_secret("ANYSCALE_API_KEY")
elif endpoint == "api.deepinfra.com/v1/openai":
custom_llm_provider = "deepinfra"
dynamic_api_key = get_secret("DEEPINFRA_API_KEY")
return model, custom_llm_provider, dynamic_api_key, api_base
# check if model in known model provider list -> for huggingface models, raise exception as they don't have a fixed provider (can be togetherai, anyscale, baseten, runpod, et.)
## openai - chatcompletion + text completion
if model in litellm.open_ai_chat_completion_models or "ft:gpt-3.5-turbo" in model:
custom_llm_provider = "openai"
elif model in litellm.open_ai_text_completion_models:
custom_llm_provider = "text-completion-openai"
## anthropic
elif model in litellm.anthropic_models:
custom_llm_provider = "anthropic"
## cohere
elif model in litellm.cohere_models or model in litellm.cohere_embedding_models:
custom_llm_provider = "cohere"
## replicate
elif model in litellm.replicate_models or (":" in model and len(model)>64):
model_parts = model.split(":")
if len(model_parts) > 1 and len(model_parts[1])==64: ## checks if model name has a 64 digit code - e.g. "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3"
custom_llm_provider = "replicate"
elif model in litellm.replicate_models:
custom_llm_provider = "replicate"
## openrouter
elif model in litellm.openrouter_models:
custom_llm_provider = "openrouter"
## openrouter
elif model in litellm.maritalk_models:
custom_llm_provider = "maritalk"
## vertex - text + chat models
elif(
model in litellm.vertex_chat_models or
model in litellm.vertex_code_chat_models or
model in litellm.vertex_text_models or
model in litellm.vertex_code_text_models
):
custom_llm_provider = "vertex_ai"
## ai21
elif model in litellm.ai21_models:
custom_llm_provider = "ai21"
## aleph_alpha
elif model in litellm.aleph_alpha_models:
custom_llm_provider = "aleph_alpha"
## baseten
elif model in litellm.baseten_models:
custom_llm_provider = "baseten"
## nlp_cloud
elif model in litellm.nlp_cloud_models:
custom_llm_provider = "nlp_cloud"
## petals
elif model in litellm.petals_models:
custom_llm_provider = "petals"
## bedrock
elif model in litellm.bedrock_models or model in litellm.bedrock_embedding_models:
custom_llm_provider = "bedrock"
# openai embeddings
elif model in litellm.open_ai_embedding_models:
custom_llm_provider = "openai"
if custom_llm_provider is None or custom_llm_provider=="":
print() # noqa
print("\033[1;31mProvider List: https://docs.litellm.ai/docs/providers\033[0m") # noqa
print() # noqa
raise ValueError(f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/{model}',..)` Learn more: https://docs.litellm.ai/docs/providers")
return model, custom_llm_provider, dynamic_api_key, api_base
except Exception as e:
raise e
def get_api_key(llm_provider: str, dynamic_api_key: Optional[str]):
api_key = (dynamic_api_key or litellm.api_key)
# openai
if llm_provider == "openai" or llm_provider == "text-completion-openai":
api_key = (
api_key or
litellm.openai_key or
get_secret("OPENAI_API_KEY")
)
# anthropic
elif llm_provider == "anthropic":
api_key = (
api_key or
litellm.anthropic_key or
get_secret("ANTHROPIC_API_KEY")
)
# ai21
elif llm_provider == "ai21":
api_key = (
api_key or
litellm.ai21_key or
get_secret("AI211_API_KEY")
)
# aleph_alpha
elif llm_provider == "aleph_alpha":
api_key = (
api_key or
litellm.aleph_alpha_key or
get_secret("ALEPH_ALPHA_API_KEY")
)
# baseten
elif llm_provider == "baseten":
api_key = (
api_key or
litellm.baseten_key or
get_secret("BASETEN_API_KEY")
)
# cohere
elif llm_provider == "cohere":
api_key = (
api_key or
litellm.cohere_key or
get_secret("COHERE_API_KEY")
)
# huggingface
elif llm_provider == "huggingface":
api_key = (
api_key or
litellm.huggingface_key or
get_secret("HUGGINGFACE_API_KEY")
)
# nlp_cloud
elif llm_provider == "nlp_cloud":
api_key = (
api_key or
litellm.nlp_cloud_key or
get_secret("NLP_CLOUD_API_KEY")
)
# replicate
elif llm_provider == "replicate":
api_key = (
api_key or
litellm.replicate_key or
get_secret("REPLICATE_API_KEY")
)
# together_ai
elif llm_provider == "together_ai":
api_key = (
api_key or
litellm.togetherai_api_key or
get_secret("TOGETHERAI_API_KEY") or
get_secret("TOGETHER_AI_TOKEN")
)
return api_key
def get_max_tokens(model: str):
"""
Get the maximum number of tokens allowed for a given model.
Parameters:
model (str): The name of the model.
Returns:
int: The maximum number of tokens allowed for the given model.
Raises:
Exception: If the model is not mapped yet.
Example:
>>> get_max_tokens("gpt-4")
8192
"""
def _get_max_position_embeddings(model_name):
# Construct the URL for the config.json file
config_url = f"https://huggingface.co/{model_name}/raw/main/config.json"
try:
# Make the HTTP request to get the raw JSON file
response = requests.get(config_url)
response.raise_for_status() # Raise an exception for bad responses (4xx or 5xx)
# Parse the JSON response
config_json = response.json()
# Extract and return the max_position_embeddings
max_position_embeddings = config_json.get("max_position_embeddings")
if max_position_embeddings is not None:
return max_position_embeddings
else:
return None
except requests.exceptions.RequestException as e:
return None
try:
if model in litellm.model_cost:
return litellm.model_cost[model]["max_tokens"]
model, custom_llm_provider, _, _ = get_llm_provider(model=model)
if custom_llm_provider == "huggingface":
max_tokens = _get_max_position_embeddings(model_name=model)
return max_tokens
else:
raise Exception()
except:
raise Exception("This model isn't mapped yet. Add it here - https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json")
def get_model_info(model: str):
"""
Get a dict for the maximum tokens (context window),
input_cost_per_token, output_cost_per_token for a given model.
Parameters:
model (str): The name of the model.
Returns:
dict: A dictionary containing the following information:
- max_tokens (int): The maximum number of tokens allowed for the given model.
- input_cost_per_token (float): The cost per token for input.
- output_cost_per_token (float): The cost per token for output.
- litellm_provider (str): The provider of the model (e.g., "openai").
- mode (str): The mode of the model (e.g., "chat" or "completion").
Raises:
Exception: If the model is not mapped yet.
Example:
>>> get_model_info("gpt-4")
{
"max_tokens": 8192,
"input_cost_per_token": 0.00003,
"output_cost_per_token": 0.00006,
"litellm_provider": "openai",
"mode": "chat"
}
"""
def _get_max_position_embeddings(model_name):
# Construct the URL for the config.json file
config_url = f"https://huggingface.co/{model_name}/raw/main/config.json"
try:
# Make the HTTP request to get the raw JSON file
response = requests.get(config_url)
response.raise_for_status() # Raise an exception for bad responses (4xx or 5xx)
# Parse the JSON response
config_json = response.json()
# Extract and return the max_position_embeddings
max_position_embeddings = config_json.get("max_position_embeddings")
if max_position_embeddings is not None:
return max_position_embeddings
else:
return None
except requests.exceptions.RequestException as e:
return None
try:
azure_llms = {
"gpt-35-turbo": "azure/gpt-3.5-turbo",
"gpt-35-turbo-16k": "azure/gpt-3.5-turbo-16k",
"gpt-35-turbo-instruct": "azure/gpt-3.5-turbo-instruct"
}
if model in azure_llms:
model = azure_llms[model]
if model in litellm.model_cost:
return litellm.model_cost[model]
model, custom_llm_provider, _, _ = get_llm_provider(model=model)
if custom_llm_provider == "huggingface":
max_tokens = _get_max_position_embeddings(model_name=model)
return {
"max_tokens": max_tokens,
"input_cost_per_token": 0,
"output_cost_per_token": 0,
"litellm_provider": "huggingface",
"mode": "chat"
}
else:
raise Exception()
except:
raise Exception("This model isn't mapped yet. Add it here - https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json")
def json_schema_type(python_type_name: str):
"""Converts standard python types to json schema types
Parameters
----------
python_type_name : str
__name__ of type
Returns
-------
str
a standard JSON schema type, "string" if not recognized.
"""
python_to_json_schema_types = {
str.__name__: "string",
int.__name__: "integer",
float.__name__: "number",
bool.__name__: "boolean",
list.__name__: "array",
dict.__name__: "object",
"NoneType": "null",
}
return python_to_json_schema_types.get(python_type_name, "string")
def function_to_dict(input_function): # noqa: C901
"""Using type hints and numpy-styled docstring,
produce a dictionnary usable for OpenAI function calling
Parameters
----------
input_function : function
A function with a numpy-style docstring
Returns
-------
dictionnary
A dictionnary to add to the list passed to `functions` parameter of `litellm.completion`
"""
# Get function name and docstring
try:
import inspect
from numpydoc.docscrape import NumpyDocString
from ast import literal_eval
except Exception as e:
raise e
name = input_function.__name__
docstring = inspect.getdoc(input_function)
numpydoc = NumpyDocString(docstring)
description = "\n".join([s.strip() for s in numpydoc["Summary"]])
# Get function parameters and their types from annotations and docstring
parameters = {}
required_params = []
param_info = inspect.signature(input_function).parameters
for param_name, param in param_info.items():
if hasattr(param, "annotation"):
param_type = json_schema_type(param.annotation.__name__)
else:
param_type = None
param_description = None
param_enum = None
# Try to extract param description from docstring using numpydoc
for param_data in numpydoc["Parameters"]:
if param_data.name == param_name:
if hasattr(param_data, "type"):
# replace type from docstring rather than annotation
param_type = param_data.type
if "optional" in param_type:
param_type = param_type.split(",")[0]
elif "{" in param_type:
# may represent a set of acceptable values
# translating as enum for function calling
try:
param_enum = str(list(literal_eval(param_type)))
param_type = "string"
except Exception:
pass
param_type = json_schema_type(param_type)
param_description = "\n".join([s.strip() for s in param_data.desc])
param_dict = {
"type": param_type,
"description": param_description,
"enum": param_enum,
}
parameters[param_name] = dict(
[(k, v) for k, v in param_dict.items() if isinstance(v, str)]
)
# Check if the parameter has no default value (i.e., it's required)
if param.default == param.empty:
required_params.append(param_name)
# Create the dictionary
result = {
"name": name,
"description": description,
"parameters": {
"type": "object",
"properties": parameters,
},
}
# Add "required" key if there are required parameters
if required_params:
result["parameters"]["required"] = required_params
return result
def load_test_model(
model: str,
custom_llm_provider: str = "",
api_base: str = "",
prompt: str = "",
num_calls: int = 0,
force_timeout: int = 0,
):
test_prompt = "Hey, how's it going"
test_calls = 100
if prompt:
test_prompt = prompt
if num_calls:
test_calls = num_calls
messages = [[{"role": "user", "content": test_prompt}] for _ in range(test_calls)]
start_time = time.time()
try:
litellm.batch_completion(
model=model,
messages=messages,
custom_llm_provider=custom_llm_provider,
api_base=api_base,
force_timeout=force_timeout,
)
end_time = time.time()
response_time = end_time - start_time
return {
"total_response_time": response_time,
"calls_made": 100,
"status": "success",
"exception": None,
}
except Exception as e:
end_time = time.time()
response_time = end_time - start_time
return {
"total_response_time": response_time,
"calls_made": 100,
"status": "failed",
"exception": e,
}
def validate_environment(model: Optional[str]=None) -> dict:
"""
Checks if the environment variables are valid for the given model.
Args:
model (Optional[str]): The name of the model. Defaults to None.
Returns:
dict: A dictionary containing the following keys:
- keys_in_environment (bool): True if all the required keys are present in the environment, False otherwise.
- missing_keys (List[str]): A list of missing keys in the environment.
"""
keys_in_environment = False
missing_keys: List[str] = []
if model is None:
return {"keys_in_environment": keys_in_environment, "missing_keys": missing_keys}
## EXTRACT LLM PROVIDER - if model name provided
try:
custom_llm_provider = get_llm_provider(model=model)
except:
custom_llm_provider = None
# # check if llm provider part of model name
# if model.split("/",1)[0] in litellm.provider_list:
# custom_llm_provider = model.split("/", 1)[0]
# model = model.split("/", 1)[1]
# custom_llm_provider_passed_in = True
if custom_llm_provider:
if custom_llm_provider == "openai":
if "OPENAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENAI_API_KEY")
elif custom_llm_provider == "azure":
if ("AZURE_API_BASE" in os.environ
and "AZURE_API_VERSION" in os.environ
and "AZURE_API_KEY" in os.environ):
keys_in_environment = True
else:
missing_keys.extend(["AZURE_API_BASE", "AZURE_API_VERSION", "AZURE_API_KEY"])
elif custom_llm_provider == "anthropic":
if "ANTHROPIC_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ANTHROPIC_API_KEY")
elif custom_llm_provider == "cohere":
if "COHERE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("COHERE_API_KEY")
elif custom_llm_provider == "replicate":
if "REPLICATE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("REPLICATE_API_KEY")
elif custom_llm_provider == "openrouter":
if "OPENROUTER_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENROUTER_API_KEY")
elif custom_llm_provider == "vertex_ai":
if ("VERTEXAI_PROJECT" in os.environ
and "VERTEXAI_LOCATION" in os.environ):
keys_in_environment = True
else:
missing_keys.extend(["VERTEXAI_PROJECT", "VERTEXAI_PROJECT"])
elif custom_llm_provider == "huggingface":
if "HUGGINGFACE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("HUGGINGFACE_API_KEY")
elif custom_llm_provider == "ai21":
if "AI21_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("AI21_API_KEY")
elif custom_llm_provider == "together_ai":
if "TOGETHERAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("TOGETHERAI_API_KEY")
elif custom_llm_provider == "aleph_alpha":
if "ALEPH_ALPHA_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ALEPH_ALPHA_API_KEY")
elif custom_llm_provider == "baseten":
if "BASETEN_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("BASETEN_API_KEY")
elif custom_llm_provider == "nlp_cloud":
if "NLP_CLOUD_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("NLP_CLOUD_API_KEY")
elif custom_llm_provider == "bedrock":
if "AWS_ACCESS_KEY_ID" in os.environ and "AWS_SECRET_ACCESS_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("AWS_ACCESS_KEY_ID")
missing_keys.append("AWS_SECRET_ACCESS_KEY")
else:
## openai - chatcompletion + text completion
if model in litellm.open_ai_chat_completion_models or litellm.open_ai_text_completion_models:
if "OPENAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENAI_API_KEY")
## anthropic
elif model in litellm.anthropic_models:
if "ANTHROPIC_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ANTHROPIC_API_KEY")
## cohere
elif model in litellm.cohere_models:
if "COHERE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("COHERE_API_KEY")
## replicate
elif model in litellm.replicate_models:
if "REPLICATE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("REPLICATE_API_KEY")
## openrouter
elif model in litellm.openrouter_models:
if "OPENROUTER_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("OPENROUTER_API_KEY")
## vertex - text + chat models
elif model in litellm.vertex_chat_models or model in litellm.vertex_text_models:
if ("VERTEXAI_PROJECT" in os.environ
and "VERTEXAI_LOCATION" in os.environ):
keys_in_environment = True
else:
missing_keys.extend(["VERTEXAI_PROJECT", "VERTEXAI_PROJECT"])
## huggingface
elif model in litellm.huggingface_models:
if "HUGGINGFACE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("HUGGINGFACE_API_KEY")
## ai21
elif model in litellm.ai21_models:
if "AI21_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("AI21_API_KEY")
## together_ai
elif model in litellm.together_ai_models:
if "TOGETHERAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("TOGETHERAI_API_KEY")
## aleph_alpha
elif model in litellm.aleph_alpha_models:
if "ALEPH_ALPHA_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("ALEPH_ALPHA_API_KEY")
## baseten
elif model in litellm.baseten_models:
if "BASETEN_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("BASETEN_API_KEY")
## nlp_cloud
elif model in litellm.nlp_cloud_models:
if "NLP_CLOUD_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("NLP_CLOUD_API_KEY")
return {"keys_in_environment": keys_in_environment, "missing_keys": missing_keys}
def set_callbacks(callback_list, function_id=None):
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, traceloopLogger, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger, langFuseLogger, customLogger, weightsBiasesLogger, langsmithLogger
try:
for callback in callback_list:
print_verbose(f"callback: {callback}")
if callback == "sentry":
try:
import sentry_sdk
except ImportError:
print_verbose("Package 'sentry_sdk' is missing. Installing it...")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "sentry_sdk"]
)
import sentry_sdk
sentry_sdk_instance = sentry_sdk
sentry_trace_rate = (
os.environ.get("SENTRY_API_TRACE_RATE")
if "SENTRY_API_TRACE_RATE" in os.environ
else "1.0"
)
sentry_sdk_instance.init(
dsn=os.environ.get("SENTRY_DSN"),
traces_sample_rate=float(sentry_trace_rate),
)
capture_exception = sentry_sdk_instance.capture_exception
add_breadcrumb = sentry_sdk_instance.add_breadcrumb
elif callback == "posthog":
try:
from posthog import Posthog
except ImportError:
print_verbose("Package 'posthog' is missing. Installing it...")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "posthog"]
)
from posthog import Posthog
posthog = Posthog(
project_api_key=os.environ.get("POSTHOG_API_KEY"),
host=os.environ.get("POSTHOG_API_URL"),
)
elif callback == "slack":
try:
from slack_bolt import App
except ImportError:
print_verbose("Package 'slack_bolt' is missing. Installing it...")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "slack_bolt"]
)
from slack_bolt import App
slack_app = App(
token=os.environ.get("SLACK_API_TOKEN"),
signing_secret=os.environ.get("SLACK_API_SECRET"),
)
alerts_channel = os.environ["SLACK_API_CHANNEL"]
print_verbose(f"Initialized Slack App: {slack_app}")
elif callback == "traceloop":
traceloopLogger = TraceloopLogger()
elif callback == "helicone":
heliconeLogger = HeliconeLogger()
elif callback == "llmonitor":
llmonitorLogger = LLMonitorLogger()
elif callback == "promptlayer":
promptLayerLogger = PromptLayerLogger()
elif callback == "langfuse":
langFuseLogger = LangFuseLogger()
elif callback == "wandb":
weightsBiasesLogger = WeightsBiasesLogger()
elif callback == "langsmith":
langsmithLogger = LangsmithLogger()
elif callback == "aispend":
aispendLogger = AISpendLogger()
elif callback == "berrispend":
berrispendLogger = BerriSpendLogger()
elif callback == "supabase":
print_verbose(f"instantiating supabase")
supabaseClient = Supabase()
elif callback == "lite_debugger":
print_verbose(f"instantiating lite_debugger")
if function_id:
liteDebuggerClient = LiteDebugger(email=function_id)
elif litellm.token:
liteDebuggerClient = LiteDebugger(email=litellm.token)
elif litellm.email:
liteDebuggerClient = LiteDebugger(email=litellm.email)
else:
liteDebuggerClient = LiteDebugger(email=str(uuid.uuid4()))
elif callable(callback):
customLogger = CustomLogger()
except Exception as e:
raise e
# NOTE: DEPRECATING this in favor of using failure_handler() in Logging:
def handle_failure(exception, traceback_exception, start_time, end_time, args, kwargs):
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger
try:
# print_verbose(f"handle_failure args: {args}")
# print_verbose(f"handle_failure kwargs: {kwargs}")
success_handler = additional_details.pop("success_handler", None)
failure_handler = additional_details.pop("failure_handler", None)
additional_details["Event_Name"] = additional_details.pop(
"failed_event_name", "litellm.failed_query"
)
print_verbose(f"self.failure_callback: {litellm.failure_callback}")
for callback in litellm.failure_callback:
try:
if callback == "slack":
slack_msg = ""
if len(kwargs) > 0:
for key in kwargs:
slack_msg += f"{key}: {kwargs[key]}\n"
if len(args) > 0:
for i, arg in enumerate(args):
slack_msg += f"LiteLLM_Args_{str(i)}: {arg}"
for detail in additional_details:
slack_msg += f"{detail}: {additional_details[detail]}\n"
slack_msg += f"Traceback: {traceback_exception}"
slack_app.client.chat_postMessage(
channel=alerts_channel, text=slack_msg
)
elif callback == "sentry":
capture_exception(exception)
elif callback == "posthog":
print_verbose(
f"inside posthog, additional_details: {len(additional_details.keys())}"
)
ph_obj = {}
if len(kwargs) > 0:
ph_obj = kwargs
if len(args) > 0:
for i, arg in enumerate(args):
ph_obj["litellm_args_" + str(i)] = arg
for detail in additional_details:
ph_obj[detail] = additional_details[detail]
event_name = additional_details["Event_Name"]
print_verbose(f"ph_obj: {ph_obj}")
print_verbose(f"PostHog Event Name: {event_name}")
if "user_id" in additional_details:
posthog.capture(
additional_details["user_id"], event_name, ph_obj
)
else: # PostHog calls require a unique id to identify a user - https://posthog.com/docs/libraries/python
unique_id = str(uuid.uuid4())
posthog.capture(unique_id, event_name)
print_verbose(f"successfully logged to PostHog!")
elif callback == "berrispend":
print_verbose("reaches berrispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
model, messages=messages
),
"completion_tokens": 0,
},
}
berrispendLogger.log_event(
model=model,
messages=messages,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "aispend":
print_verbose("reaches aispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"usage": {
"prompt_tokens": prompt_token_calculator(
model, messages=messages
),
"completion_tokens": 0,
},
}
aispendLogger.log_event(
model=model,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "supabase":
print_verbose("reaches supabase for logging!")
print_verbose(f"supabaseClient: {supabaseClient}")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
result = {
"model": model,
"created": time.time(),
"error": traceback_exception,
"usage": {
"prompt_tokens": prompt_token_calculator(
model, messages=messages
),
"completion_tokens": 0,
},
}
supabaseClient.log_event(
model=model,
messages=messages,
end_user=kwargs.get("user", "default"),
response_obj=result,
start_time=start_time,
end_time=end_time,
litellm_call_id=kwargs["litellm_call_id"],
print_verbose=print_verbose,
)
except:
print_verbose(
f"Error Occurred while logging failure: {traceback.format_exc()}"
)
pass
if failure_handler and callable(failure_handler):
call_details = {
"exception": exception,
"additional_details": additional_details,
}
failure_handler(call_details)
pass
except Exception as e:
# LOGGING
exception_logging(logger_fn=user_logger_fn, exception=e)
pass
async def convert_to_streaming_response_async(response_object: Optional[dict]=None):
"""
Asynchronously converts a response object to a streaming response.
Args:
response_object (Optional[dict]): The response object to be converted. Defaults to None.
Raises:
Exception: If the response object is None.
Yields:
ModelResponse: The converted streaming response object.
Returns:
None
"""
if response_object is None:
raise Exception("Error in response object format")
model_response_object = ModelResponse(stream=True)
if model_response_object is None:
raise Exception("Error in response creating model response object")
choice_list = []
for idx, choice in enumerate(response_object["choices"]):
delta = Delta(
content=choice["message"].get("content", None),
role=choice["message"]["role"],
function_call=choice["message"].get("function_call", None),
tool_calls=choice["message"].get("tool_calls", None)
)
finish_reason = choice.get("finish_reason", None)
if finish_reason is None:
finish_reason = choice.get("finish_details")
choice = StreamingChoices(finish_reason=finish_reason, index=idx, delta=delta)
choice_list.append(choice)
model_response_object.choices = choice_list
if "usage" in response_object and response_object["usage"] is not None:
model_response_object.usage = Usage(
completion_tokens=response_object["usage"].get("completion_tokens", 0),
prompt_tokens=response_object["usage"].get("prompt_tokens", 0),
total_tokens=response_object["usage"].get("total_tokens", 0)
)
if "id" in response_object:
model_response_object.id = response_object["id"]
if "system_fingerprint" in response_object:
model_response_object.system_fingerprint = response_object["system_fingerprint"]
if "model" in response_object:
model_response_object.model = response_object["model"]
yield model_response_object
await asyncio.sleep(0)
def convert_to_streaming_response(response_object: Optional[dict]=None):
# used for yielding Cache hits when stream == True
if response_object is None:
raise Exception("Error in response object format")
model_response_object = ModelResponse(stream=True)
choice_list=[]
for idx, choice in enumerate(response_object["choices"]):
delta = Delta(
content=choice["message"].get("content", None),
role=choice["message"]["role"],
function_call=choice["message"].get("function_call", None),
tool_calls=choice["message"].get("tool_calls", None)
)
finish_reason = choice.get("finish_reason", None)
if finish_reason == None:
# gpt-4 vision can return 'finish_reason' or 'finish_details'
finish_reason = choice.get("finish_details")
choice = StreamingChoices(finish_reason=finish_reason, index=idx, delta=delta)
choice_list.append(choice)
model_response_object.choices = choice_list
if "usage" in response_object and response_object["usage"] is not None:
model_response_object.usage.completion_tokens = response_object["usage"].get("completion_tokens", 0) # type: ignore
model_response_object.usage.prompt_tokens = response_object["usage"].get("prompt_tokens", 0) # type: ignore
model_response_object.usage.total_tokens = response_object["usage"].get("total_tokens", 0) # type: ignore
if "id" in response_object:
model_response_object.id = response_object["id"]
if "system_fingerprint" in response_object:
model_response_object.system_fingerprint = response_object["system_fingerprint"]
if "model" in response_object:
model_response_object.model = response_object["model"]
yield model_response_object
def convert_to_model_response_object(response_object: Optional[dict]=None, model_response_object: Optional[Union[ModelResponse, EmbeddingResponse]]=None, response_type: Literal["completion", "embedding"] = "completion", stream = False):
try:
if response_type == "completion" and (model_response_object is None or isinstance(model_response_object, ModelResponse)):
if response_object is None or model_response_object is None:
raise Exception("Error in response object format")
if stream == True:
# for returning cached responses, we need to yield a generator
return convert_to_streaming_response(
response_object=response_object
)
choice_list=[]
for idx, choice in enumerate(response_object["choices"]):
message = Message(
content=choice["message"].get("content", None),
role=choice["message"]["role"],
function_call=choice["message"].get("function_call", None),
tool_calls=choice["message"].get("tool_calls", None)
)
finish_reason = choice.get("finish_reason", None)
if finish_reason == None:
# gpt-4 vision can return 'finish_reason' or 'finish_details'
finish_reason = choice.get("finish_details")
choice = Choices(finish_reason=finish_reason, index=idx, message=message)
choice_list.append(choice)
model_response_object.choices = choice_list
if "usage" in response_object and response_object["usage"] is not None:
model_response_object.usage.completion_tokens = response_object["usage"].get("completion_tokens", 0) # type: ignore
model_response_object.usage.prompt_tokens = response_object["usage"].get("prompt_tokens", 0) # type: ignore
model_response_object.usage.total_tokens = response_object["usage"].get("total_tokens", 0) # type: ignore
if "id" in response_object:
model_response_object.id = response_object["id"]
if "system_fingerprint" in response_object:
model_response_object.system_fingerprint = response_object["system_fingerprint"]
if "model" in response_object:
model_response_object.model = response_object["model"]
return model_response_object
elif response_type == "embedding" and (model_response_object is None or isinstance(model_response_object, EmbeddingResponse)):
if response_object is None:
raise Exception("Error in response object format")
if model_response_object is None:
model_response_object = EmbeddingResponse()
if "model" in response_object:
model_response_object.model = response_object["model"]
if "object" in response_object:
model_response_object.object = response_object["object"]
model_response_object.data = response_object["data"]
if "usage" in response_object and response_object["usage"] is not None:
model_response_object.usage.completion_tokens = response_object["usage"].get("completion_tokens", 0) # type: ignore
model_response_object.usage.prompt_tokens = response_object["usage"].get("prompt_tokens", 0) # type: ignore
model_response_object.usage.total_tokens = response_object["usage"].get("total_tokens", 0) # type: ignore
return model_response_object
except Exception as e:
raise Exception(f"Invalid response object {e}")
# NOTE: DEPRECATING this in favor of using success_handler() in Logging:
def handle_success(args, kwargs, result, start_time, end_time):
global heliconeLogger, aispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger
try:
model = args[0] if len(args) > 0 else kwargs["model"]
input = (
args[1]
if len(args) > 1
else kwargs.get("messages", kwargs.get("input", None))
)
success_handler = additional_details.pop("success_handler", None)
failure_handler = additional_details.pop("failure_handler", None)
additional_details["Event_Name"] = additional_details.pop(
"successful_event_name", "litellm.succes_query"
)
for callback in litellm.success_callback:
try:
if callback == "posthog":
ph_obj = {}
for detail in additional_details:
ph_obj[detail] = additional_details[detail]
event_name = additional_details["Event_Name"]
if "user_id" in additional_details:
posthog.capture(
additional_details["user_id"], event_name, ph_obj
)
else: # PostHog calls require a unique id to identify a user - https://posthog.com/docs/libraries/python
unique_id = str(uuid.uuid4())
posthog.capture(unique_id, event_name, ph_obj)
pass
elif callback == "slack":
slack_msg = ""
for detail in additional_details:
slack_msg += f"{detail}: {additional_details[detail]}\n"
slack_app.client.chat_postMessage(
channel=alerts_channel, text=slack_msg
)
elif callback == "aispend":
print_verbose("reaches aispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
aispendLogger.log_event(
model=model,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
except Exception as e:
# LOGGING
exception_logging(logger_fn=user_logger_fn, exception=e)
print_verbose(
f"[Non-Blocking] Success Callback Error - {traceback.format_exc()}"
)
pass
if success_handler and callable(success_handler):
success_handler(args, kwargs)
pass
except Exception as e:
# LOGGING
exception_logging(logger_fn=user_logger_fn, exception=e)
print_verbose(
f"[Non-Blocking] Success Callback Error - {traceback.format_exc()}"
)
pass
def acreate(*args, **kwargs): ## Thin client to handle the acreate langchain call
return litellm.acompletion(*args, **kwargs)
def prompt_token_calculator(model, messages):
# use tiktoken or anthropic's tokenizer depending on the model
text = " ".join(message["content"] for message in messages)
num_tokens = 0
if "claude" in model:
try:
import anthropic
except:
Exception("Anthropic import failed please run `pip install anthropic`")
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
anthropic = Anthropic()
num_tokens = anthropic.count_tokens(text)
else:
num_tokens = len(encoding.encode(text))
return num_tokens
def valid_model(model):
try:
# for a given model name, check if the user has the right permissions to access the model
if (
model in litellm.open_ai_chat_completion_models
or model in litellm.open_ai_text_completion_models
):
openai.Model.retrieve(model)
else:
messages = [{"role": "user", "content": "Hello World"}]
litellm.completion(model=model, messages=messages)
except:
raise BadRequestError(message="", model=model, llm_provider="")
def check_valid_key(model: str, api_key: str):
"""
Checks if a given API key is valid for a specific model by making a litellm.completion call with max_tokens=10
Args:
model (str): The name of the model to check the API key against.
api_key (str): The API key to be checked.
Returns:
bool: True if the API key is valid for the model, False otherwise.
"""
messages = [{"role": "user", "content": "Hey, how's it going?"}]
try:
litellm.completion(model=model, messages=messages, api_key=api_key, max_tokens=10)
return True
except AuthenticationError as e:
return False
except Exception as e:
return False
def _should_retry(status_code: int):
"""
Reimplementation of openai's should retry logic, since that one can't be imported.
https://github.com/openai/openai-python/blob/af67cfab4210d8e497c05390ce14f39105c77519/src/openai/_base_client.py#L639
"""
# If the server explicitly says whether or not to retry, obey.
# Retry on request timeouts.
if status_code == 408:
return True
# Retry on lock timeouts.
if status_code == 409:
return True
# Retry on rate limits.
if status_code == 429:
return True
# Retry internal errors.
if status_code >= 500:
return True
return False
def _calculate_retry_after(remaining_retries: int, max_retries: int, response_headers: Optional[httpx.Headers]=None, min_timeout: int = 0):
"""
Reimplementation of openai's calculate retry after, since that one can't be imported.
https://github.com/openai/openai-python/blob/af67cfab4210d8e497c05390ce14f39105c77519/src/openai/_base_client.py#L631
"""
try:
import email # openai import
# About the Retry-After header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
#
# <http-date>". See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After#syntax for
# details.
if response_headers is not None:
retry_header = response_headers.get("retry-after")
try:
retry_after = int(retry_header)
except Exception:
retry_date_tuple = email.utils.parsedate_tz(retry_header)
if retry_date_tuple is None:
retry_after = -1
else:
retry_date = email.utils.mktime_tz(retry_date_tuple)
retry_after = int(retry_date - time.time())
else:
retry_after = -1
except Exception:
retry_after = -1
# If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says.
if 0 < retry_after <= 60:
return retry_after
initial_retry_delay = 0.5
max_retry_delay = 8.0
nb_retries = max_retries - remaining_retries
# Apply exponential backoff, but not more than the max.
sleep_seconds = min(initial_retry_delay * pow(2.0, nb_retries), max_retry_delay)
# Apply some jitter, plus-or-minus half a second.
jitter = 1 - 0.25 * random.random()
timeout = sleep_seconds * jitter
return timeout if timeout >= min_timeout else min_timeout
# integration helper function
def modify_integration(integration_name, integration_params):
global supabaseClient
if integration_name == "supabase":
if "table_name" in integration_params:
Supabase.supabase_table_name = integration_params["table_name"]
# custom prompt helper function
def register_prompt_template(model: str, roles: dict, initial_prompt_value: str = "", final_prompt_value: str = ""):
"""
Register a prompt template to follow your custom format for a given model
Args:
model (str): The name of the model.
roles (dict): A dictionary mapping roles to their respective prompt values.
initial_prompt_value (str, optional): The initial prompt value. Defaults to "".
final_prompt_value (str, optional): The final prompt value. Defaults to "".
Returns:
dict: The updated custom prompt dictionary.
Example usage:
```
import litellm
litellm.register_prompt_template(
model="llama-2",
initial_prompt_value="You are a good assistant" # [OPTIONAL]
roles={
"system": {
"pre_message": "[INST] <<SYS>>\n", # [OPTIONAL]
"post_message": "\n<</SYS>>\n [/INST]\n" # [OPTIONAL]
},
"user": {
"pre_message": "[INST] ", # [OPTIONAL]
"post_message": " [/INST]" # [OPTIONAL]
},
"assistant": {
"pre_message": "\n" # [OPTIONAL]
"post_message": "\n" # [OPTIONAL]
}
}
final_prompt_value="Now answer as best you can:" # [OPTIONAL]
)
```
"""
model = get_llm_provider(model=model)[0]
litellm.custom_prompt_dict[model] = {
"roles": roles,
"initial_prompt_value": initial_prompt_value,
"final_prompt_value": final_prompt_value
}
return litellm.custom_prompt_dict
####### DEPRECATED ################
def get_all_keys(llm_provider=None):
try:
global last_fetched_at_keys
# if user is using hosted product -> instantiate their env with their hosted api keys - refresh every 5 minutes
print_verbose(f"Reaches get all keys, llm_provider: {llm_provider}")
user_email = (
os.getenv("LITELLM_EMAIL")
or litellm.email
or litellm.token
or os.getenv("LITELLM_TOKEN")
)
if user_email:
time_delta = 0
if last_fetched_at_keys != None:
current_time = time.time()
time_delta = current_time - last_fetched_at_keys
if (
time_delta > 300 or last_fetched_at_keys == None or llm_provider
): # if the llm provider is passed in , assume this happening due to an AuthError for that provider
# make the api call
last_fetched_at = time.time()
print_verbose(f"last_fetched_at: {last_fetched_at}")
response = requests.post(
url="http://api.litellm.ai/get_all_keys",
headers={"content-type": "application/json"},
data=json.dumps({"user_email": user_email}),
)
print_verbose(f"get model key response: {response.text}")
data = response.json()
# update model list
for key, value in data[
"model_keys"
].items(): # follows the LITELLM API KEY format - <UPPERCASE_PROVIDER_NAME>_API_KEY - e.g. HUGGINGFACE_API_KEY
os.environ[key] = value
# set model alias map
for model_alias, value in data["model_alias_map"].items():
litellm.model_alias_map[model_alias] = value
return "it worked!"
return None
return None
except:
print_verbose(
f"[Non-Blocking Error] get_all_keys error - {traceback.format_exc()}"
)
pass
def get_model_list():
global last_fetched_at, print_verbose
try:
# if user is using hosted product -> get their updated model list
user_email = (
os.getenv("LITELLM_EMAIL")
or litellm.email
or litellm.token
or os.getenv("LITELLM_TOKEN")
)
if user_email:
# make the api call
last_fetched_at = time.time()
print_verbose(f"last_fetched_at: {last_fetched_at}")
response = requests.post(
url="http://api.litellm.ai/get_model_list",
headers={"content-type": "application/json"},
data=json.dumps({"user_email": user_email}),
)
print_verbose(f"get_model_list response: {response.text}")
data = response.json()
# update model list
model_list = data["model_list"]
# # check if all model providers are in environment
# model_providers = data["model_providers"]
# missing_llm_provider = None
# for item in model_providers:
# if f"{item.upper()}_API_KEY" not in os.environ:
# missing_llm_provider = item
# break
# # update environment - if required
# threading.Thread(target=get_all_keys, args=(missing_llm_provider)).start()
return model_list
return [] # return empty list by default
except:
print_verbose(
f"[Non-Blocking Error] get_model_list error - {traceback.format_exc()}"
)
####### EXCEPTION MAPPING ################
def exception_type(
model,
original_exception,
custom_llm_provider,
completion_kwargs={},
):
global user_logger_fn, liteDebuggerClient
exception_mapping_worked = False
if litellm.suppress_debug_info is False:
print() # noqa
print("\033[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\033[0m") # noqa
print("LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True'.") # noqa
print() # noqa
try:
if model:
error_str = str(original_exception)
if isinstance(original_exception, BaseException):
exception_type = type(original_exception).__name__
else:
exception_type = ""
if "Request Timeout Error" in error_str or "Request timed out" in error_str:
exception_mapping_worked = True
raise Timeout(
message=f"APITimeoutError - Request timed out",
model=model,
llm_provider=custom_llm_provider
)
if custom_llm_provider == "openai" or custom_llm_provider == "text-completion-openai" or custom_llm_provider == "custom_openai":
if "This model's maximum context length is" in error_str or "Request too large" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"OpenAIException - {original_exception.message}",
llm_provider="openai",
model=model,
response=original_exception.response
)
elif "invalid_request_error" in error_str and "Incorrect API key provided" not in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"OpenAIException - {original_exception.message}",
llm_provider="openai",
model=model,
response=original_exception.response
)
elif hasattr(original_exception, "status_code"):
exception_mapping_worked = True
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"OpenAIException - {original_exception.message}",
llm_provider="openai",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"OpenAIException - {original_exception.message}",
model=model,
llm_provider="openai",
)
if original_exception.status_code == 422:
exception_mapping_worked = True
raise BadRequestError(
message=f"OpenAIException - {original_exception.message}",
model=model,
llm_provider="openai",
response=original_exception.response
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"OpenAIException - {original_exception.message}",
model=model,
llm_provider="openai",
response=original_exception.response
)
elif original_exception.status_code == 503:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"OpenAIException - {original_exception.message}",
model=model,
llm_provider="openai",
response=original_exception.response
)
elif original_exception.status_code == 504: # gateway timeout error
exception_mapping_worked = True
raise Timeout(
message=f"OpenAIException - {original_exception.message}",
model=model,
llm_provider="openai",
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"OpenAIException - {original_exception.message}",
llm_provider="openai",
model=model,
request=original_exception.request
)
else:
# if no status code then it is an APIConnectionError: https://github.com/openai/openai-python#handling-errors
raise APIConnectionError(
__cause__=original_exception.__cause__,
llm_provider=custom_llm_provider,
model=model,
request=original_exception.request
)
elif custom_llm_provider == "anthropic": # one of the anthropics
if hasattr(original_exception, "message"):
if "prompt is too long" in original_exception.message or "prompt: length" in original_exception.message:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=original_exception.message,
model=model,
llm_provider="anthropic",
response=original_exception.response
)
if "Invalid API Key" in original_exception.message:
exception_mapping_worked = True
raise AuthenticationError(
message=original_exception.message,
model=model,
llm_provider="anthropic",
response=original_exception.response
)
if hasattr(original_exception, "status_code"):
print_verbose(f"status_code: {original_exception.status_code}")
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 400 or original_exception.status_code == 413:
exception_mapping_worked = True
raise BadRequestError(
message=f"AnthropicException - {original_exception.message}",
model=model,
llm_provider="anthropic",
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"AnthropicException - {original_exception.message}",
model=model,
llm_provider="anthropic",
request=original_exception.request
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model,
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"AnthropicException - {original_exception.message}",
llm_provider="anthropic",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "replicate":
if "Incorrect authentication token" in error_str:
exception_mapping_worked = True
raise AuthenticationError(
message=f"ReplicateException - {error_str}",
llm_provider="replicate",
model=model,
response=original_exception.response
)
elif "input is too long" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"ReplicateException - {error_str}",
model=model,
llm_provider="replicate",
response=original_exception.response
)
elif exception_type == "ModelError":
exception_mapping_worked = True
raise BadRequestError(
message=f"ReplicateException - {error_str}",
model=model,
llm_provider="replicate",
response=original_exception.response
)
elif "Request was throttled" in error_str:
exception_mapping_worked = True
raise RateLimitError(
message=f"ReplicateException - {error_str}",
llm_provider="replicate",
model=model,
response=original_exception.response
)
elif hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 400 or original_exception.status_code == 422 or original_exception.status_code == 413:
exception_mapping_worked = True
raise BadRequestError(
message=f"ReplicateException - {original_exception.message}",
model=model,
llm_provider="replicate",
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"ReplicateException - {original_exception.message}",
model=model,
llm_provider="replicate",
request=original_exception.request
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model,
response=original_exception.response
)
exception_mapping_worked = True
raise APIError(
status_code=500,
message=f"ReplicateException - {str(original_exception)}",
llm_provider="replicate",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "bedrock":
if "too many tokens" in error_str or "expected maxLength:" in error_str or "Input is too long" in error_str or "Too many input tokens" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"BedrockException: Context Window Error - {error_str}",
model=model,
llm_provider="bedrock",
response=original_exception.response
)
if "Malformed input request" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"BedrockException - {error_str}",
model=model,
llm_provider="bedrock",
response=original_exception.response
)
if "Unable to locate credentials" in error_str or "The security token included in the request is invalid" in error_str:
exception_mapping_worked = True
raise AuthenticationError(
message=f"BedrockException Invalid Authentication - {error_str}",
model=model,
llm_provider="bedrock",
response=original_exception.response
)
if "throttlingException" in error_str or "ThrottlingException" in error_str:
exception_mapping_worked = True
raise RateLimitError(
message=f"BedrockException: Rate Limit Error - {error_str}",
model=model,
llm_provider="bedrock",
response=original_exception.response
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"BedrockException - {original_exception.message}",
llm_provider="bedrock",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"BedrockException - {original_exception.message}",
llm_provider="bedrock",
model=model,
response=original_exception.response
)
elif custom_llm_provider == "sagemaker":
if "Unable to locate credentials" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"SagemakerException - {error_str}",
model=model,
llm_provider="sagemaker",
response=original_exception.response
)
elif "Input validation error: `best_of` must be > 0 and <= 2" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"SagemakerException - the value of 'n' must be > 0 and <= 2 for sagemaker endpoints",
model=model,
llm_provider="sagemaker",
response=original_exception.response
)
elif custom_llm_provider == "vertex_ai":
if "Vertex AI API has not been used in project" in error_str or "Unable to find your project" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"VertexAIException - {error_str}",
model=model,
llm_provider="vertex_ai",
response=original_exception.response
)
elif "403" in error_str:
exception_mapping_worked = True
raise AuthenticationError(
message=f"VertexAIException - {error_str}",
model=model,
llm_provider="vertex_ai",
response=original_exception.response
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 400:
exception_mapping_worked = True
raise BadRequestError(
message=f"VertexAIException - {error_str}",
model=model,
llm_provider="vertex_ai",
response=original_exception.response
)
if original_exception.status_code == 500:
exception_mapping_worked = True
raise APIError(
message=f"VertexAIException - {error_str}",
status_code=500,
model=model,
llm_provider="vertex_ai",
request=original_exception.request
)
elif custom_llm_provider == "palm":
if "503 Getting metadata" in error_str:
# auth errors look like this
# 503 Getting metadata from plugin failed with error: Reauthentication is needed. Please run `gcloud auth application-default login` to reauthenticate.
exception_mapping_worked = True
raise BadRequestError(
message=f"PalmException - Invalid api key",
model=model,
llm_provider="palm",
response=original_exception.response
)
if "400 Request payload size exceeds" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"PalmException - {error_str}",
model=model,
llm_provider="palm",
response=original_exception.response
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 400:
exception_mapping_worked = True
raise BadRequestError(
message=f"PalmException - {error_str}",
model=model,
llm_provider="palm",
response=original_exception.response
)
# Dailed: Error occurred: 400 Request payload size exceeds the limit: 20000 bytes
elif custom_llm_provider == "cohere": # Cohere
if (
"invalid api token" in error_str
or "No API key provided." in error_str
):
exception_mapping_worked = True
raise AuthenticationError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
elif "too many tokens" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"CohereException - {original_exception.message}",
model=model,
llm_provider="cohere",
response=original_exception.response
)
elif hasattr(original_exception, "status_code"):
if original_exception.status_code == 400 or original_exception.status_code == 498:
exception_mapping_worked = True
raise BadRequestError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
elif (
"CohereConnectionError" in exception_type
): # cohere seems to fire these errors when we load test it (1k+ messages / min)
exception_mapping_worked = True
raise RateLimitError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
elif "invalid type:" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
elif "Unexpected server error" in error_str:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response
)
else:
if hasattr(original_exception, "status_code"):
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
request=original_exception.request
)
raise original_exception
elif custom_llm_provider == "huggingface":
if "length limit exceeded" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=error_str,
model=model,
llm_provider="huggingface",
response=original_exception.response
)
elif "A valid user token is required" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=error_str,
llm_provider="huggingface",
model=model,
response=original_exception.response
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 400:
exception_mapping_worked = True
raise BadRequestError(
message=f"HuggingfaceException - {original_exception.message}",
model=model,
llm_provider="huggingface",
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"HuggingfaceException - {original_exception.message}",
model=model,
llm_provider="huggingface",
request=original_exception.request
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model,
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "ai21":
if hasattr(original_exception, "message"):
if "Prompt has too many tokens" in original_exception.message:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21",
response=original_exception.response
)
if "Bad or missing API token." in original_exception.message:
exception_mapping_worked = True
raise BadRequestError(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21",
response=original_exception.response
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AI21Exception - {original_exception.message}",
llm_provider="ai21",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21",
request=original_exception.request
)
if original_exception.status_code == 422:
exception_mapping_worked = True
raise BadRequestError(
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21",
response=original_exception.response
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AI21Exception - {original_exception.message}",
llm_provider="ai21",
model=model,
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"AI21Exception - {original_exception.message}",
llm_provider="ai21",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "nlp_cloud":
if "detail" in error_str:
if "Input text length should not exceed" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"NLPCloudException - {error_str}",
model=model,
llm_provider="nlp_cloud",
response=original_exception.response
)
elif "value is not a valid" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"NLPCloudException - {error_str}",
model=model,
llm_provider="nlp_cloud",
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=500,
message=f"NLPCloudException - {error_str}",
model=model,
llm_provider="nlp_cloud",
request=original_exception.request
)
if hasattr(original_exception, "status_code"): # https://docs.nlpcloud.com/?shell#errors
if original_exception.status_code == 400 or original_exception.status_code == 406 or original_exception.status_code == 413 or original_exception.status_code == 422:
exception_mapping_worked = True
raise BadRequestError(
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 401 or original_exception.status_code == 403:
exception_mapping_worked = True
raise AuthenticationError(
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 522 or original_exception.status_code == 524:
exception_mapping_worked = True
raise Timeout(
message=f"NLPCloudException - {original_exception.message}",
model=model,
llm_provider="nlp_cloud",
request=original_exception.request
)
elif original_exception.status_code == 429 or original_exception.status_code == 402:
exception_mapping_worked = True
raise RateLimitError(
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 500 or original_exception.status_code == 503:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
request=original_exception.request
)
elif original_exception.status_code == 504 or original_exception.status_code == 520:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"NLPCloudException - {original_exception.message}",
model=model,
llm_provider="nlp_cloud",
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "together_ai":
import json
try:
error_response = json.loads(error_str)
except:
error_response = {"error": error_str}
if "error" in error_response and "`inputs` tokens + `max_new_tokens` must be <=" in error_response["error"]:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai",
response=original_exception.response
)
elif "error" in error_response and "invalid private key" in error_response["error"]:
exception_mapping_worked = True
raise AuthenticationError(
message=f"TogetherAIException - {error_response['error']}",
llm_provider="together_ai",
model=model,
response=original_exception.response
)
elif "error" in error_response and "INVALID_ARGUMENT" in error_response["error"]:
exception_mapping_worked = True
raise BadRequestError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai",
response=original_exception.response
)
elif "error" in error_response and "API key doesn't match expected format." in error_response["error"]:
exception_mapping_worked = True
raise BadRequestError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai",
response=original_exception.response
)
elif "error_type" in error_response and error_response["error_type"] == "validation":
exception_mapping_worked = True
raise BadRequestError(
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai",
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"TogetherAIException - {original_exception.message}",
model=model,
llm_provider="together_ai",
request=original_exception.request
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"TogetherAIException - {original_exception.message}",
llm_provider="together_ai",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 524:
exception_mapping_worked = True
raise Timeout(
message=f"TogetherAIException - {original_exception.message}",
llm_provider="together_ai",
model=model,
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"TogetherAIException - {original_exception.message}",
llm_provider="together_ai",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "aleph_alpha":
if "This is longer than the model's maximum context length" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response
)
elif "InvalidToken" in error_str or "No token provided" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response
)
elif hasattr(original_exception, "status_code"):
print_verbose(f"status code: {original_exception.status_code}")
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model
)
elif original_exception.status_code == 400:
exception_mapping_worked = True
raise BadRequestError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response
)
raise original_exception
raise original_exception
elif custom_llm_provider == "ollama":
if "no attribute 'async_get_ollama_response_stream" in error_str:
exception_mapping_worked = True
raise ImportError("Import error - trying to use async for ollama. import async_generator failed. Try 'pip install async_generator'")
if isinstance(original_exception, dict):
error_str = original_exception.get("error", "")
else:
error_str = str(original_exception)
if "no such file or directory" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"OllamaException: Invalid Model/Model not loaded - {original_exception}",
model=model,
llm_provider="ollama",
response=original_exception.response
)
elif "Failed to establish a new connection" in error_str:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"OllamaException: {original_exception}",
llm_provider="ollama",
model=model,
response=original_exception.response
)
elif "Invalid response object from API" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"OllamaException: {original_exception}",
llm_provider="ollama",
model=model,
response=original_exception.response
)
elif custom_llm_provider == "vllm":
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 0:
exception_mapping_worked = True
raise APIConnectionError(
message=f"VLLMException - {original_exception.message}",
llm_provider="vllm",
model=model,
request=original_exception.request
)
elif custom_llm_provider == "azure":
if "This model's maximum context length is" in error_str:
exception_mapping_worked = True
raise ContextWindowExceededError(
message=f"AzureException - {original_exception.message}",
llm_provider="azure",
model=model,
response=original_exception.response
)
elif "invalid_request_error" in error_str:
exception_mapping_worked = True
raise BadRequestError(
message=f"AzureException - {original_exception.message}",
llm_provider="azure",
model=model,
response=original_exception.response
)
elif hasattr(original_exception, "status_code"):
exception_mapping_worked = True
if original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"AzureException - {original_exception.message}",
llm_provider="azure",
model=model,
response=original_exception.response
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"AzureException - {original_exception.message}",
model=model,
llm_provider="azure",
request=original_exception.request
)
if original_exception.status_code == 422:
exception_mapping_worked = True
raise BadRequestError(
message=f"AzureException - {original_exception.message}",
model=model,
llm_provider="azure",
response=original_exception.response
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"AzureException - {original_exception.message}",
model=model,
llm_provider="azure",
response=original_exception.response
)
else:
exception_mapping_worked = True
raise APIError(
status_code=original_exception.status_code,
message=f"AzureException - {original_exception.message}",
llm_provider="azure",
model=model,
request=original_exception.request
)
else:
# if no status code then it is an APIConnectionError: https://github.com/openai/openai-python#handling-errors
raise APIConnectionError(
__cause__=original_exception.__cause__,
llm_provider="azure",
model=model,
request=original_exception.request
)
if "BadRequestError.__init__() missing 1 required positional argument: 'param'" in str(original_exception): # deal with edge-case invalid request error bug in openai-python sdk
exception_mapping_worked = True
raise BadRequestError(
message=f"OpenAIException: This can happen due to missing AZURE_API_VERSION: {str(original_exception)}",
model=model,
llm_provider=custom_llm_provider,
response=original_exception.response
)
else: # ensure generic errors always return APIConnectionError=
exception_mapping_worked = True
if hasattr(original_exception, "request"):
raise APIConnectionError(
message=f"{str(original_exception)}",
llm_provider=custom_llm_provider,
model=model,
request=original_exception.request
)
else:
raise APIConnectionError(
message=f"{str(original_exception)}",
llm_provider=custom_llm_provider,
model=model,
request= httpx.Request(method="POST", url="https://api.openai.com/v1/") # stub the request
)
except Exception as e:
# LOGGING
exception_logging(
logger_fn=user_logger_fn,
additional_args={
"exception_mapping_worked": exception_mapping_worked,
"original_exception": original_exception,
},
exception=e,
)
## AUTH ERROR
if isinstance(e, AuthenticationError) and (
litellm.email or "LITELLM_EMAIL" in os.environ
):
threading.Thread(target=get_all_keys, args=(e.llm_provider,)).start()
# don't let an error with mapping interrupt the user from receiving an error from the llm api calls
if exception_mapping_worked:
raise e
else:
raise original_exception
####### CRASH REPORTING ################
def safe_crash_reporting(model=None, exception=None, custom_llm_provider=None):
data = {
"model": model,
"exception": str(exception),
"custom_llm_provider": custom_llm_provider,
}
executor.submit(litellm_telemetry, data)
# threading.Thread(target=litellm_telemetry, args=(data,), daemon=True).start()
def get_or_generate_uuid():
temp_dir = os.path.join(os.path.abspath(os.sep), "tmp")
uuid_file = os.path.join(temp_dir, "litellm_uuid.txt")
try:
# Try to open the file and load the UUID
with open(uuid_file, "r") as file:
uuid_value = file.read()
if uuid_value:
uuid_value = uuid_value.strip()
else:
raise FileNotFoundError
except FileNotFoundError:
# Generate a new UUID if the file doesn't exist or is empty
try:
new_uuid = uuid.uuid4()
uuid_value = str(new_uuid)
with open(uuid_file, "w") as file:
file.write(uuid_value)
except: # if writing to tmp/litellm_uuid.txt then retry writing to litellm_uuid.txt
try:
new_uuid = uuid.uuid4()
uuid_value = str(new_uuid)
with open("litellm_uuid.txt", "w") as file:
file.write(uuid_value)
except: # if this 3rd attempt fails just pass
# Good first issue for someone to improve this function :)
return
except:
# [Non-Blocking Error]
return
return uuid_value
def litellm_telemetry(data):
# Load or generate the UUID
uuid_value = ""
try:
uuid_value = get_or_generate_uuid()
except:
uuid_value = str(uuid.uuid4())
try:
# Prepare the data to send to litellm logging api
try:
pkg_version = importlib.metadata.version("litellm")
except:
pkg_version = None
if "model" not in data:
data["model"] = None
payload = {
"uuid": uuid_value,
"data": data,
"version:": pkg_version
}
# Make the POST request to litellm logging api
response = requests.post(
"https://litellm-logging.onrender.com/logging",
headers={"Content-Type": "application/json"},
json=payload,
)
response.raise_for_status() # Raise an exception for HTTP errors
except:
# [Non-Blocking Error]
return
######### Secret Manager ############################
# checks if user has passed in a secret manager client
# if passed in then checks the secret there
def get_secret(secret_name: str, default_value: Optional[str]=None):
if secret_name.startswith("os.environ/"):
secret_name = secret_name.replace("os.environ/", "")
try:
if litellm.secret_manager_client is not None:
try:
client = litellm.secret_manager_client
if type(client).__module__ + '.' + type(client).__name__ == 'azure.keyvault.secrets._client.SecretClient': # support Azure Secret Client - from azure.keyvault.secrets import SecretClient
secret = retrieved_secret = client.get_secret(secret_name).value
else: # assume the default is infisicial client
secret = client.get_secret(secret_name).secret_value
except: # check if it's in os.environ
secret = os.environ.get(secret_name)
return secret
else:
return os.environ.get(secret_name)
except Exception as e:
if default_value is not None:
return default_value
else:
raise e
######## Streaming Class ############################
# wraps the completion stream to return the correct format for the model
# replicate/anthropic/cohere
class CustomStreamWrapper:
def __init__(self, completion_stream, model, custom_llm_provider=None, logging_obj=None):
self.model = model
self.custom_llm_provider = custom_llm_provider
self.logging_obj = logging_obj
self.completion_stream = completion_stream
self.sent_first_chunk = False
self.sent_last_chunk = False
self.special_tokens = ["<|assistant|>", "<|system|>", "<|user|>", "<s>", "</s>"]
self.holding_chunk = ""
self.complete_response = ""
def __iter__(self):
return self
def __aiter__(self):
return self
def process_chunk(self, chunk: str):
"""
NLP Cloud streaming returns the entire response, for each chunk. Process this, to only return the delta.
"""
try:
chunk = chunk.strip()
self.complete_response = self.complete_response.strip()
if chunk.startswith(self.complete_response):
# Remove last_sent_chunk only if it appears at the start of the new chunk
chunk = chunk[len(self.complete_response):]
self.complete_response += chunk
return chunk
except Exception as e:
raise e
def check_special_tokens(self, chunk: str, finish_reason: Optional[str]):
hold = False
if finish_reason:
for token in self.special_tokens:
if token in chunk:
chunk = chunk.replace(token, "")
return hold, chunk
if self.sent_first_chunk is True:
return hold, chunk
curr_chunk = self.holding_chunk + chunk
curr_chunk = curr_chunk.strip()
for token in self.special_tokens:
if len(curr_chunk) < len(token) and curr_chunk in token:
hold = True
elif len(curr_chunk) >= len(token):
if token in curr_chunk:
self.holding_chunk = curr_chunk.replace(token, "")
hold = True
else:
pass
if hold is False: # reset
self.holding_chunk = ""
return hold, curr_chunk
def handle_anthropic_chunk(self, chunk):
str_line = chunk.decode("utf-8") # Convert bytes to string
text = ""
is_finished = False
finish_reason = None
if str_line.startswith("data:"):
data_json = json.loads(str_line[5:])
text = data_json.get("completion", "")
if data_json.get("stop_reason", None):
is_finished = True
finish_reason = data_json["stop_reason"]
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "error" in str_line:
raise ValueError(f"Unable to parse response. Original response: {str_line}")
else:
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
def handle_together_ai_chunk(self, chunk):
chunk = chunk.decode("utf-8")
text = ""
is_finished = False
finish_reason = None
if "text" in chunk:
text_index = chunk.find('"text":"') # this checks if text: exists
text_start = text_index + len('"text":"')
text_end = chunk.find('"}', text_start)
if text_index != -1 and text_end != -1:
extracted_text = chunk[text_start:text_end]
text = extracted_text
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "[DONE]" in chunk:
return {"text": text, "is_finished": True, "finish_reason": "stop"}
elif "error" in chunk:
raise ValueError(chunk)
else:
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
def handle_huggingface_chunk(self, chunk):
try:
if type(chunk) != str:
chunk = chunk.decode("utf-8") # DO NOT REMOVE this: This is required for HF inference API + Streaming
text = ""
is_finished = False
finish_reason = ""
print_verbose(f"chunk: {chunk}")
if chunk.startswith("data:"):
data_json = json.loads(chunk[5:])
print_verbose(f"data json: {data_json}")
if "token" in data_json and "text" in data_json["token"]:
text = data_json["token"]["text"]
if data_json.get("details", False) and data_json["details"].get("finish_reason", False):
is_finished = True
finish_reason = data_json["details"]["finish_reason"]
elif data_json.get("generated_text", False): # if full generated text exists, then stream is complete
text = "" # don't return the final bos token
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "error" in chunk:
raise ValueError(chunk)
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except Exception as e:
traceback.print_exc()
# raise(e)
def handle_ai21_chunk(self, chunk): # fake streaming
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = data_json["completions"][0]["data"]["text"]
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_maritalk_chunk(self, chunk): # fake streaming
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = data_json["answer"]
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_nlp_cloud_chunk(self, chunk):
text = ""
is_finished = False
finish_reason = ""
try:
if "dolphin" in self.model:
chunk = self.process_chunk(chunk=chunk)
else:
data_json = json.loads(chunk)
chunk = data_json["generated_text"]
text = chunk
if "[DONE]" in text:
text = text.replace("[DONE]", "")
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except Exception as e:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_aleph_alpha_chunk(self, chunk):
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = data_json["completions"][0]["completion"]
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_cohere_chunk(self, chunk):
chunk = chunk.decode("utf-8")
data_json = json.loads(chunk)
try:
text = ""
is_finished = False
finish_reason = ""
if "text" in data_json:
text = data_json["text"]
elif "is_finished" in data_json:
is_finished = data_json["is_finished"]
finish_reason = data_json["finish_reason"]
else:
raise Exception(data_json)
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_azure_chunk(self, chunk):
is_finished = False
finish_reason = ""
text = ""
print_verbose(f"chunk: {chunk}")
if "data: [DONE]" in chunk:
text = ""
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif chunk.startswith("data:"):
data_json = json.loads(chunk[5:]) # chunk.startswith("data:"):
try:
if len(data_json["choices"]) > 0:
text = data_json["choices"][0]["delta"].get("content", "")
if data_json["choices"][0].get("finish_reason", None):
is_finished = True
finish_reason = data_json["choices"][0]["finish_reason"]
print_verbose(f"text: {text}; is_finished: {is_finished}; finish_reason: {finish_reason}")
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
elif "error" in chunk:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
else:
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
def handle_replicate_chunk(self, chunk):
try:
text = ""
is_finished = False
finish_reason = ""
if "output" in chunk:
text = chunk['output']
if "status" in chunk:
if chunk["status"] == "succeeded":
is_finished = True
finish_reason = "stop"
elif chunk.get("error", None):
raise Exception(chunk["error"])
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
def handle_openai_chat_completion_chunk(self, chunk):
try:
print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n")
str_line = chunk
text = ""
is_finished = False
finish_reason = None
original_chunk = None # this is used for function/tool calling
if len(str_line.choices) > 0:
if str_line.choices[0].delta.content is not None:
text = str_line.choices[0].delta.content
else: # function/tool calling chunk - when content is None. in this case we just return the original chunk from openai
original_chunk = str_line
if str_line.choices[0].finish_reason:
is_finished = True
finish_reason = str_line.choices[0].finish_reason
return {
"text": text,
"is_finished": is_finished,
"finish_reason": finish_reason,
"original_chunk": str_line
}
except Exception as e:
traceback.print_exc()
raise e
def handle_openai_text_completion_chunk(self, chunk):
try:
str_line = chunk
text = ""
is_finished = False
finish_reason = None
print_verbose(f"str_line: {str_line}")
if "data: [DONE]" in str_line:
text = ""
is_finished = True
finish_reason = "stop"
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif str_line.startswith("data:"):
data_json = json.loads(str_line[5:])
print_verbose(f"delta content: {data_json}")
text = data_json["choices"][0].get("text", "")
if data_json["choices"][0].get("finish_reason", None):
is_finished = True
finish_reason = data_json["choices"][0]["finish_reason"]
print_verbose(f"text: {text}; is_finished: {is_finished}; finish_reason: {finish_reason}")
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
elif "error" in str_line:
raise ValueError(f"Unable to parse response. Original response: {str_line}")
else:
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
except Exception as e:
traceback.print_exc()
raise e
def handle_baseten_chunk(self, chunk):
try:
chunk = chunk.decode("utf-8")
if len(chunk) > 0:
if chunk.startswith("data:"):
data_json = json.loads(chunk[5:])
if "token" in data_json and "text" in data_json["token"]:
return data_json["token"]["text"]
else:
return ""
data_json = json.loads(chunk)
if "model_output" in data_json:
if isinstance(data_json["model_output"], dict) and "data" in data_json["model_output"] and isinstance(data_json["model_output"]["data"], list):
return data_json["model_output"]["data"][0]
elif isinstance(data_json["model_output"], str):
return data_json["model_output"]
elif "completion" in data_json and isinstance(data_json["completion"], str):
return data_json["completion"]
else:
raise ValueError(f"Unable to parse response. Original response: {chunk}")
else:
return ""
else:
return ""
except:
traceback.print_exc()
return ""
def handle_bedrock_stream(self, chunk):
if hasattr(chunk, "get"):
chunk = chunk.get('chunk')
chunk_data = json.loads(chunk.get('bytes').decode())
else:
chunk_data = json.loads(chunk.decode())
if chunk_data:
text = ""
is_finished = False
finish_reason = ""
if "outputText" in chunk_data:
text = chunk_data['outputText']
# ai21 mapping
if "ai21" in self.model: # fake ai21 streaming
text = chunk_data.get('completions')[0].get('data').get('text')
is_finished = True
finish_reason = "stop"
# anthropic mapping
elif "completion" in chunk_data:
text = chunk_data['completion'] # bedrock.anthropic
stop_reason = chunk_data.get("stop_reason", None)
if stop_reason != None:
is_finished = True
finish_reason = stop_reason
######## bedrock.cohere mappings ###############
# meta mapping
elif "generation" in chunk_data:
text = chunk_data['generation'] # bedrock.meta
# cohere mapping
elif "text" in chunk_data:
text = chunk_data["text"] # bedrock.cohere
# cohere mapping for finish reason
elif "finish_reason" in chunk_data:
finish_reason = chunk_data["finish_reason"]
is_finished = True
elif chunk_data.get("completionReason", None):
is_finished = True
finish_reason = chunk_data["completionReason"]
elif chunk.get("error", None):
raise Exception(chunk["error"])
return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason}
return ""
def chunk_creator(self, chunk):
model_response = ModelResponse(stream=True, model=self.model)
model_response.choices = [StreamingChoices()]
model_response.choices[0].finish_reason = None
response_obj = {}
try:
# return this for all models
completion_obj = {"content": ""}
if self.custom_llm_provider and self.custom_llm_provider == "anthropic":
response_obj = self.handle_anthropic_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.model == "replicate" or self.custom_llm_provider == "replicate":
response_obj = self.handle_replicate_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif (
self.custom_llm_provider and self.custom_llm_provider == "together_ai"):
response_obj = self.handle_together_ai_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "huggingface":
response_obj = self.handle_huggingface_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "baseten": # baseten doesn't provide streaming
completion_obj["content"] = self.handle_baseten_chunk(chunk)
elif self.custom_llm_provider and self.custom_llm_provider == "ai21": #ai21 doesn't provide streaming
response_obj = self.handle_ai21_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "maritalk":
response_obj = self.handle_maritalk_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider and self.custom_llm_provider == "vllm":
completion_obj["content"] = chunk[0].outputs[0].text
elif self.custom_llm_provider and self.custom_llm_provider == "aleph_alpha": #aleph alpha doesn't provide streaming
response_obj = self.handle_aleph_alpha_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider == "nlp_cloud":
try:
response_obj = self.handle_nlp_cloud_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
except Exception as e:
if self.sent_last_chunk:
raise e
else:
if self.sent_first_chunk is False:
raise Exception("An unknown error occurred with the stream")
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
elif self.custom_llm_provider and self.custom_llm_provider == "vertex_ai":
try:
# print(chunk)
if hasattr(chunk, 'text'):
# vertexAI chunks return
# MultiCandidateTextGenerationResponse(text=' ```python\n# This Python code says "Hi" 100 times.\n\n# Create', _prediction_response=Prediction(predictions=[{'candidates': [{'content': ' ```python\n# This Python code says "Hi" 100 times.\n\n# Create', 'author': '1'}], 'citationMetadata': [{'citations': None}], 'safetyAttributes': [{'blocked': False, 'scores': None, 'categories': None}]}], deployed_model_id='', model_version_id=None, model_resource_name=None, explanations=None), is_blocked=False, safety_attributes={}, candidates=[ ```python
# This Python code says "Hi" 100 times.
# Create])
completion_obj["content"] = chunk.text
else:
completion_obj["content"] = str(chunk)
except StopIteration as e:
if self.sent_last_chunk:
raise e
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
elif self.custom_llm_provider == "cohere":
response_obj = self.handle_cohere_chunk(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
elif self.custom_llm_provider == "bedrock":
if self.sent_last_chunk:
raise StopIteration
response_obj = self.handle_bedrock_stream(chunk)
completion_obj["content"] = response_obj["text"]
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
self.sent_last_chunk = True
elif self.custom_llm_provider == "sagemaker":
print_verbose(f"ENTERS SAGEMAKER STREAMING")
if len(self.completion_stream)==0:
if self.sent_last_chunk:
raise StopIteration
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
new_chunk = self.completion_stream
print_verbose(f"sagemaker chunk: {new_chunk}")
completion_obj["content"] = new_chunk
self.completion_stream = self.completion_stream[len(self.completion_stream):]
elif self.custom_llm_provider == "petals":
if len(self.completion_stream)==0:
if self.sent_last_chunk:
raise StopIteration
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
chunk_size = 30
new_chunk = self.completion_stream[:chunk_size]
completion_obj["content"] = new_chunk
self.completion_stream = self.completion_stream[chunk_size:]
time.sleep(0.05)
elif self.custom_llm_provider == "palm":
# fake streaming
response_obj = {}
if len(self.completion_stream)==0:
if self.sent_last_chunk:
raise StopIteration
else:
model_response.choices[0].finish_reason = "stop"
self.sent_last_chunk = True
chunk_size = 30
new_chunk = self.completion_stream[:chunk_size]
completion_obj["content"] = new_chunk
self.completion_stream = self.completion_stream[chunk_size:]
time.sleep(0.05)
elif self.custom_llm_provider == "ollama":
if "error" in chunk:
exception_type(model=self.model, custom_llm_provider=self.custom_llm_provider, original_exception=chunk["error"])
completion_obj = chunk
elif self.custom_llm_provider == "text-completion-openai":
response_obj = self.handle_openai_text_completion_chunk(chunk)
completion_obj["content"] = response_obj["text"]
print_verbose(f"completion obj content: {completion_obj['content']}")
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
else: # openai chat model
response_obj = self.handle_openai_chat_completion_chunk(chunk)
if response_obj == None:
return
completion_obj["content"] = response_obj["text"]
print_verbose(f"completion obj content: {completion_obj['content']}")
if response_obj["is_finished"]:
model_response.choices[0].finish_reason = response_obj["finish_reason"]
model_response.model = self.model
print_verbose(f"model_response: {model_response}; completion_obj: {completion_obj}")
print_verbose(f"model_response finish reason 3: {model_response.choices[0].finish_reason}")
if len(completion_obj["content"]) > 0: # cannot set content of an OpenAI Object to be an empty string
hold, model_response_str = self.check_special_tokens(chunk=completion_obj["content"], finish_reason=model_response.choices[0].finish_reason) # filter out bos/eos tokens from openai-compatible hf endpoints
print_verbose(f"hold - {hold}, model_response_str - {model_response_str}")
if hold is False:
## check if openai/azure chunk
original_chunk = response_obj.get("original_chunk", None)
if original_chunk:
model_response.id = original_chunk.id
if len(original_chunk.choices) > 0:
try:
delta = dict(original_chunk.choices[0].delta)
model_response.choices[0].delta = Delta(**delta)
except Exception as e:
model_response.choices[0].delta = Delta()
else:
return
model_response.system_fingerprint = original_chunk.system_fingerprint
if self.sent_first_chunk == False:
model_response.choices[0].delta["role"] = "assistant"
self.sent_first_chunk = True
else:
## else
completion_obj["content"] = model_response_str
if self.sent_first_chunk == False:
completion_obj["role"] = "assistant"
self.sent_first_chunk = True
model_response.choices[0].delta = Delta(**completion_obj)
print_verbose(f"model_response: {model_response}")
return model_response
else:
return
elif model_response.choices[0].finish_reason:
# flush any remaining holding chunk
if len(self.holding_chunk) > 0:
if model_response.choices[0].delta.content is None:
model_response.choices[0].delta.content = self.holding_chunk
else:
model_response.choices[0].delta.content = self.holding_chunk + model_response.choices[0].delta.content
self.holding_chunk = ""
model_response.choices[0].finish_reason = map_finish_reason(model_response.choices[0].finish_reason) # ensure consistent output to openai
return model_response
elif response_obj is not None and response_obj.get("original_chunk", None) is not None: # function / tool calling branch - only set for openai/azure compatible endpoints
# enter this branch when no content has been passed in response
original_chunk = response_obj.get("original_chunk", None)
model_response.id = original_chunk.id
if len(original_chunk.choices) > 0:
if original_chunk.choices[0].delta.function_call is not None or original_chunk.choices[0].delta.tool_calls is not None:
try:
delta = dict(original_chunk.choices[0].delta)
model_response.choices[0].delta = Delta(**delta)
except Exception as e:
model_response.choices[0].delta = Delta()
else:
return
else:
return
model_response.system_fingerprint = original_chunk.system_fingerprint
if self.sent_first_chunk == False:
model_response.choices[0].delta["role"] = "assistant"
self.sent_first_chunk = True
return model_response
else:
return
except StopIteration:
raise StopIteration
except Exception as e:
traceback_exception = traceback.format_exc()
e.message = str(e)
raise exception_type(model=self.model, custom_llm_provider=self.custom_llm_provider, original_exception=e)
## needs to handle the empty string case (even starting chunk can be an empty string)
def __next__(self):
try:
while True:
if isinstance(self.completion_stream, str) or isinstance(self.completion_stream, bytes):
chunk = self.completion_stream
else:
chunk = next(self.completion_stream)
print_verbose(f"value of chunk: {chunk} ")
if chunk is not None and chunk != b'':
print_verbose(f"PROCESSED CHUNK PRE CHUNK CREATOR: {chunk}")
response = self.chunk_creator(chunk=chunk)
print_verbose(f"PROCESSED CHUNK POST CHUNK CREATOR: {response}")
if response is None:
continue
## LOGGING
threading.Thread(target=self.logging_obj.success_handler, args=(response,)).start() # log response
return response
except StopIteration:
raise # Re-raise StopIteration
except Exception as e:
print_verbose(f"HITS AN ERROR: {str(e)}")
traceback_exception = traceback.format_exc()
# LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated
threading.Thread(target=self.logging_obj.failure_handler, args=(e, traceback_exception)).start()
raise e
async def __anext__(self):
try:
if (self.custom_llm_provider == "openai"
or self.custom_llm_provider == "azure"
or self.custom_llm_provider == "custom_openai"
or self.custom_llm_provider == "text-completion-openai"
or self.custom_llm_provider == "huggingface"):
async for chunk in self.completion_stream:
if chunk == "None" or chunk is None:
raise Exception
# chunk_creator() does logging/stream chunk building. We need to let it know its being called in_async_func, so we don't double add chunks.
# __anext__ also calls async_success_handler, which does logging
processed_chunk = self.chunk_creator(chunk=chunk)
if processed_chunk is None:
continue
## LOGGING
threading.Thread(target=self.logging_obj.success_handler, args=(processed_chunk,)).start() # log response
asyncio.create_task(self.logging_obj.async_success_handler(processed_chunk,))
return processed_chunk
raise StopAsyncIteration
else: # temporary patch for non-aiohttp async calls
# example - boto3 bedrock llms
processed_chunk = next(self)
asyncio.create_task(self.logging_obj.async_success_handler(processed_chunk,))
return processed_chunk
except StopAsyncIteration:
raise
except StopIteration:
raise StopAsyncIteration # Re-raise StopIteration
except Exception as e:
traceback_exception = traceback.format_exc()
# Handle any exceptions that might occur during streaming
asyncio.create_task(self.logging_obj.async_failure_handler(e, traceback_exception))
raise StopAsyncIteration
class TextCompletionStreamWrapper:
def __init__(self, completion_stream, model):
self.completion_stream = completion_stream
self.model = model
def __iter__(self):
return self
def __aiter__(self):
return self
def __next__(self):
# model_response = ModelResponse(stream=True, model=self.model)
response = TextCompletionResponse()
try:
while True: # loop until a non-empty string is found
# return this for all models
chunk = next(self.completion_stream)
response["id"] = chunk.get("id", None)
response["object"] = "text_completion"
response["created"] = response.get("created", None)
response["model"] = response.get("model", None)
text_choices = TextChoices()
text_choices["text"] = chunk["choices"][0]["delta"]["content"]
text_choices["index"] = response["choices"][0]["index"]
text_choices["finish_reason"] = response["choices"][0]["finish_reason"]
response["choices"] = [text_choices]
return response
except StopIteration:
raise StopIteration
except Exception as e:
print(f"got exception {e}") # noqa
async def __anext__(self):
try:
return next(self)
except StopIteration:
raise StopAsyncIteration
def mock_completion_streaming_obj(model_response, mock_response, model):
for i in range(0, len(mock_response), 3):
completion_obj = {"role": "assistant", "content": mock_response[i: i+3]}
model_response.choices[0].delta = completion_obj
yield model_response
########## Reading Config File ############################
def read_config_args(config_path) -> dict:
try:
import os
current_path = os.getcwd()
with open(config_path, "r") as config_file:
config = json.load(config_file)
# read keys/ values from config file and return them
return config
except Exception as e:
raise e
########## experimental completion variants ############################
def completion_with_config(config: Union[dict, str], **kwargs):
"""
Generate a litellm.completion() using a config dict and all supported completion args
Example config;
config = {
"default_fallback_models": # [Optional] List of model names to try if a call fails
"available_models": # [Optional] List of all possible models you could call
"adapt_to_prompt_size": # [Optional] True/False - if you want to select model based on prompt size (will pick from available_models)
"model": {
"model-name": {
"needs_moderation": # [Optional] True/False - if you want to call openai moderations endpoint before making completion call. Will raise exception, if flagged.
"error_handling": {
"error-type": { # One of the errors listed here - https://docs.litellm.ai/docs/exception_mapping#custom-mapping-list
"fallback_model": "" # str, name of the model it should try instead, when that error occurs
}
}
}
}
}
Parameters:
config (Union[dict, str]): A configuration for litellm
**kwargs: Additional keyword arguments for litellm.completion
Returns:
litellm.ModelResponse: A ModelResponse with the generated completion
"""
if config is not None:
if isinstance(config, str):
config = read_config_args(config)
elif isinstance(config, dict):
config = config
else:
raise Exception("Config path must be a string or a dictionary.")
else:
raise Exception("Config path not passed in.")
if config is None:
raise Exception("No completion config in the config file")
models_with_config = config["model"].keys()
model = kwargs["model"]
messages = kwargs["messages"]
## completion config
fallback_models = config.get("default_fallback_models", None)
available_models = config.get("available_models", None)
adapt_to_prompt_size = config.get("adapt_to_prompt_size", False)
trim_messages_flag = config.get("trim_messages", False)
prompt_larger_than_model = False
max_model = model
try:
max_tokens = litellm.get_max_tokens(model)["max_tokens"]
except:
max_tokens = 2048 # assume curr model's max window is 2048 tokens
if adapt_to_prompt_size:
## Pick model based on token window
prompt_tokens = litellm.token_counter(model="gpt-3.5-turbo", text="".join(message["content"] for message in messages))
try:
curr_max_tokens = litellm.get_max_tokens(model)["max_tokens"]
except:
curr_max_tokens = 2048
if curr_max_tokens < prompt_tokens:
prompt_larger_than_model = True
for available_model in available_models:
try:
curr_max_tokens = litellm.get_max_tokens(available_model)["max_tokens"]
if curr_max_tokens > max_tokens:
max_tokens = curr_max_tokens
max_model = available_model
if curr_max_tokens > prompt_tokens:
model = available_model
prompt_larger_than_model = False
except:
continue
if prompt_larger_than_model:
messages = trim_messages(messages=messages, model=max_model)
kwargs["messages"] = messages
kwargs["model"] = model
try:
if model in models_with_config:
## Moderation check
if config["model"][model].get("needs_moderation"):
input = " ".join(message["content"] for message in messages)
response = litellm.moderation(input=input)
flagged = response["results"][0]["flagged"]
if flagged:
raise Exception("This response was flagged as inappropriate")
## Model-specific Error Handling
error_handling = None
if config["model"][model].get("error_handling"):
error_handling = config["model"][model]["error_handling"]
try:
response = litellm.completion(**kwargs)
return response
except Exception as e:
exception_name = type(e).__name__
fallback_model = None
if error_handling and exception_name in error_handling:
error_handler = error_handling[exception_name]
# either switch model or api key
fallback_model = error_handler.get("fallback_model", None)
if fallback_model:
kwargs["model"] = fallback_model
return litellm.completion(**kwargs)
raise e
else:
return litellm.completion(**kwargs)
except Exception as e:
if fallback_models:
model = fallback_models.pop(0)
return completion_with_fallbacks(model=model, messages=messages, fallbacks=fallback_models)
raise e
def completion_with_fallbacks(**kwargs):
nested_kwargs = kwargs.pop("kwargs", {})
response = None
rate_limited_models = set()
model_expiration_times = {}
start_time = time.time()
original_model = kwargs["model"]
fallbacks = [kwargs["model"]] + nested_kwargs.get("fallbacks", [])
if "fallbacks" in nested_kwargs:
del nested_kwargs["fallbacks"] # remove fallbacks so it's not recursive
litellm_call_id = str(uuid.uuid4())
# max time to process a request with fallbacks: default 45s
while response == None and time.time() - start_time < 45:
for model in fallbacks:
# loop thru all models
try:
# check if it's dict or new model string
if isinstance(model, dict): # completion(model="gpt-4", fallbacks=[{"api_key": "", "api_base": ""}, {"api_key": "", "api_base": ""}])
kwargs["api_key"] = model.get("api_key", None)
kwargs["api_base"] = model.get("api_base", None)
model = model.get("model", original_model)
elif (
model in rate_limited_models
): # check if model is currently cooling down
if (
model_expiration_times.get(model)
and time.time() >= model_expiration_times[model]
):
rate_limited_models.remove(
model
) # check if it's been 60s of cool down and remove model
else:
continue # skip model
# delete model from kwargs if it exists
if kwargs.get("model"):
del kwargs["model"]
print_verbose(f"trying to make completion call with model: {model}")
kwargs["litellm_call_id"] = litellm_call_id
kwargs = {**kwargs, **nested_kwargs} # combine the openai + litellm params at the same level
response = litellm.completion(**kwargs, model=model)
print_verbose(f"response: {response}")
if response != None:
return response
except Exception as e:
print_verbose(e)
rate_limited_models.add(model)
model_expiration_times[model] = (
time.time() + 60
) # cool down this selected model
pass
return response
def process_system_message(system_message, max_tokens, model):
system_message_event = {"role": "system", "content": system_message}
system_message_tokens = get_token_count([system_message_event], model)
if system_message_tokens > max_tokens:
print_verbose("`tokentrimmer`: Warning, system message exceeds token limit. Trimming...")
# shorten system message to fit within max_tokens
new_system_message = shorten_message_to_fit_limit(system_message_event, max_tokens, model)
system_message_tokens = get_token_count([new_system_message], model)
return system_message_event, max_tokens - system_message_tokens
def process_messages(messages, max_tokens, model):
# Process messages from older to more recent
messages = messages[::-1]
final_messages = []
for message in messages:
used_tokens = get_token_count(final_messages, model)
available_tokens = max_tokens - used_tokens
if available_tokens <= 3:
break
final_messages = attempt_message_addition(final_messages=final_messages, message=message, available_tokens=available_tokens, max_tokens=max_tokens, model=model)
return final_messages
def attempt_message_addition(final_messages, message, available_tokens, max_tokens, model):
temp_messages = [message] + final_messages
temp_message_tokens = get_token_count(messages=temp_messages, model=model)
if temp_message_tokens <= max_tokens:
return temp_messages
# if temp_message_tokens > max_tokens, try shortening temp_messages
elif "function_call" not in message:
# fit updated_message to be within temp_message_tokens - max_tokens (aka the amount temp_message_tokens is greate than max_tokens)
updated_message = shorten_message_to_fit_limit(message, available_tokens, model)
if can_add_message(updated_message, final_messages, max_tokens, model):
return [updated_message] + final_messages
return final_messages
def can_add_message(message, messages, max_tokens, model):
if get_token_count(messages + [message], model) <= max_tokens:
return True
return False
def get_token_count(messages, model):
return token_counter(model=model, messages=messages)
def shorten_message_to_fit_limit(
message,
tokens_needed,
model):
"""
Shorten a message to fit within a token limit by removing characters from the middle.
"""
# For OpenAI models, even blank messages cost 7 token,
# and if the buffer is less than 3, the while loop will never end,
# hence the value 10.
if 'gpt' in model and tokens_needed <= 10:
return message
content = message["content"]
while True:
total_tokens = get_token_count([message], model)
if total_tokens <= tokens_needed:
break
ratio = (tokens_needed) / total_tokens
new_length = int(len(content) * ratio) -1
new_length = max(0, new_length)
half_length = new_length // 2
left_half = content[:half_length]
right_half = content[-half_length:]
trimmed_content = left_half + '..' + right_half
message["content"] = trimmed_content
content = trimmed_content
return message
# LiteLLM token trimmer
# this code is borrowed from https://github.com/KillianLucas/tokentrim/blob/main/tokentrim/tokentrim.py
# Credits for this code go to Killian Lucas
def trim_messages(
messages,
model: Optional[str] = None,
trim_ratio: float = 0.75,
return_response_tokens: bool = False,
max_tokens = None
):
"""
Trim a list of messages to fit within a model's token limit.
Args:
messages: Input messages to be trimmed. Each message is a dictionary with 'role' and 'content'.
model: The LiteLLM model being used (determines the token limit).
trim_ratio: Target ratio of tokens to use after trimming. Default is 0.75, meaning it will trim messages so they use about 75% of the model's token limit.
return_response_tokens: If True, also return the number of tokens left available for the response after trimming.
max_tokens: Instead of specifying a model or trim_ratio, you can specify this directly.
Returns:
Trimmed messages and optionally the number of tokens available for response.
"""
# Initialize max_tokens
# if users pass in max tokens, trim to this amount
messages = copy.deepcopy(messages)
try:
print_verbose(f"trimming messages")
if max_tokens == None:
# Check if model is valid
if model in litellm.model_cost:
max_tokens_for_model = litellm.model_cost[model]['max_tokens']
max_tokens = int(max_tokens_for_model * trim_ratio)
else:
# if user did not specify max tokens
# or passed an llm litellm does not know
# do nothing, just return messages
return
system_message = ""
for message in messages:
if message["role"] == "system":
system_message += '\n' if system_message else ''
system_message += message["content"]
current_tokens = token_counter(model=model, messages=messages)
print_verbose(f"Current tokens: {current_tokens}, max tokens: {max_tokens}")
# Do nothing if current tokens under messages
if current_tokens < max_tokens:
return messages
#### Trimming messages if current_tokens > max_tokens
print_verbose(f"Need to trim input messages: {messages}, current_tokens{current_tokens}, max_tokens: {max_tokens}")
if system_message:
system_message_event, max_tokens = process_system_message(system_message=system_message, max_tokens=max_tokens, model=model)
if max_tokens == 0: # the system messages are too long
return [system_message_event]
# Since all system messages are combined and trimmed to fit the max_tokens,
# we remove all system messages from the messages list
messages = [message for message in messages if message["role"] != "system"]
final_messages = process_messages(messages=messages, max_tokens=max_tokens, model=model)
# Add system message to the beginning of the final messages
if system_message:
final_messages = [system_message_event] + final_messages
if return_response_tokens: # if user wants token count with new trimmed messages
response_tokens = max_tokens - get_token_count(final_messages, model)
return final_messages, response_tokens
return final_messages
except Exception as e: # [NON-Blocking, if error occurs just return final_messages
print_verbose(f"Got exception while token trimming{e}")
return messages
def get_valid_models():
"""
Returns a list of valid LLMs based on the set environment variables
Args:
None
Returns:
A list of valid LLMs
"""
try:
# get keys set in .env
environ_keys = os.environ.keys()
valid_providers = []
# for all valid providers, make a list of supported llms
valid_models = []
for provider in litellm.provider_list:
# edge case litellm has together_ai as a provider, it should be togetherai
provider = provider.replace("_", "")
# litellm standardizes expected provider keys to
# PROVIDER_API_KEY. Example: OPENAI_API_KEY, COHERE_API_KEY
expected_provider_key = f"{provider.upper()}_API_KEY"
if expected_provider_key in environ_keys:
# key is set
valid_providers.append(provider)
for provider in valid_providers:
if provider == "azure":
valid_models.append("Azure-LLM")
else:
models_for_provider = litellm.models_by_provider.get(provider, [])
valid_models.extend(models_for_provider)
return valid_models
except:
return [] # NON-Blocking
# used for litellm.text_completion() to transform HF logprobs to OpenAI.Completion() format
def transform_logprobs(hf_response):
# Initialize an empty list for the transformed logprobs
transformed_logprobs = []
# For each Hugging Face response, transform the logprobs
for response in hf_response:
# Extract the relevant information from the response
response_details = response['details']
top_tokens = response_details.get("top_tokens", {})
# Initialize an empty list for the token information
token_info = {
'tokens': [],
'token_logprobs': [],
'text_offset': [],
'top_logprobs': [],
}
for i, token in enumerate(response_details['prefill']):
# Extract the text of the token
token_text = token['text']
# Extract the logprob of the token
token_logprob = token['logprob']
# Add the token information to the 'token_info' list
token_info['tokens'].append(token_text)
token_info['token_logprobs'].append(token_logprob)
# stub this to work with llm eval harness
top_alt_tokens = { "": -1, "": -2, "": -3 }
token_info['top_logprobs'].append(top_alt_tokens)
# For each element in the 'tokens' list, extract the relevant information
for i, token in enumerate(response_details['tokens']):
# Extract the text of the token
token_text = token['text']
# Extract the logprob of the token
token_logprob = token['logprob']
top_alt_tokens = {}
temp_top_logprobs = []
if top_tokens != {}:
temp_top_logprobs = top_tokens[i]
# top_alt_tokens should look like this: { "alternative_1": -1, "alternative_2": -2, "alternative_3": -3 }
for elem in temp_top_logprobs:
text = elem["text"]
logprob = elem["logprob"]
top_alt_tokens[text] = logprob
# Add the token information to the 'token_info' list
token_info['tokens'].append(token_text)
token_info['token_logprobs'].append(token_logprob)
token_info['top_logprobs'].append(top_alt_tokens)
# Add the text offset of the token
# This is computed as the sum of the lengths of all previous tokens
token_info['text_offset'].append(sum(len(t['text']) for t in response_details['tokens'][:i]))
# Add the 'token_info' list to the 'transformed_logprobs' list
transformed_logprobs = token_info
return transformed_logprobs
# used in LiteLLM Router
def remove_model_id(original_model_string):
# Find the index of "ModelID" in the string
index_of_model_id = original_model_string.find("-ModelID")
# Remove everything after "-ModelID" if it exists
if index_of_model_id != -1:
return original_model_string[:index_of_model_id]
return original_model_string | [
"Hey, how's it going?",
"0",
"True",
"gpt-3.5-turbo",
"Hey, how's it going",
"input_cost_per_token",
"Hello World",
"None",
"adapt_to_prompt_size",
"False",
"application/json",
"content",
"ft:gpt-3.5-turbo",
"prompt_tokens"
] |
2024-01-10 | Boostchat-ai/litellm | litellm~proxy~tests~bursty_load_test_completion.py | import time, asyncio
from openai import AsyncOpenAI
import uuid
import traceback
litellm_client = AsyncOpenAI(
api_key="test",
base_url="http://0.0.0.0:8000"
)
async def litellm_completion():
# Your existing code for litellm_completion goes here
try:
response = await litellm_client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": f"This is a test: {uuid.uuid4()}"*180}], # this is about 4k tokens per request
)
print(response)
return response
except Exception as e:
# If there's an exception, log the error message
with open("error_log.txt", "a") as error_log:
error_log.write(f"Error during completion: {str(e)}\n")
pass
async def main():
start = time.time()
n = 60 # Send 60 concurrent requests, each with 4k tokens = 240k Tokens
tasks = [litellm_completion() for _ in range(n)]
chat_completions = await asyncio.gather(*tasks)
successful_completions = [c for c in chat_completions if c is not None]
# Write errors to error_log.txt
with open("error_log.txt", "a") as error_log:
for completion in chat_completions:
if isinstance(completion, str):
error_log.write(completion + "\n")
print(n, time.time() - start, len(successful_completions))
if __name__ == "__main__":
# Blank out contents of error_log.txt
open("error_log.txt", "w").close()
asyncio.run(main())
| [] |
2024-01-10 | kazuki765/learn-lang-chain | src~summarizer.py | import streamlit as st
# from streamlit_chat import message
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
SystemMessage,
HumanMessage,
AIMessage
)
from langchain.callbacks import get_openai_callback
import requests
from bs4 import BeautifulSoup
from urllib.parse import urlparse
from dotenv import load_dotenv
load_dotenv(verbose=True)
def init_page():
st.set_page_config(
page_title="Website Summarizer",
page_icon="🤗"
)
st.header("Website Summarizer 🤗")
st.sidebar.title("Options")
def init_messages():
clear_button = st.sidebar.button("Clear Conversation", key="clear")
if clear_button or "messages" not in st.session_state:
st.session_state.messages = [
SystemMessage(content="You are a helpful assistant.")
]
st.session_state.costs = []
def select_model():
model = st.sidebar.radio("Choose a model:", ("GPT-3.5", "GPT-4"))
if model == "GPT-3.5":
model_name = "gpt-3.5-turbo"
else:
model_name = "gpt-4"
return ChatOpenAI(temperature=0, model_name=model_name)
def get_url_input():
url = st.text_input("URL: ", key="input")
return url
def validate_url(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False
def get_content(url):
try:
with st.spinner("Fetching Content ..."):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
# fetch text from main (change the below code to filter page)
if soup.main:
return soup.main.get_text()
elif soup.article:
return soup.article.get_text()
else:
return soup.body.get_text()
except:
st.write('something wrong')
return None
def build_prompt(content, n_chars=300):
return f"""以下はとある。Webページのコンテンツである。内容を{n_chars}程度でわかりやすく要約してください。
========
{content[:1000]}
========
日本語で書いてね!
"""
def get_answer(llm, messages):
with get_openai_callback() as cb:
answer = llm(messages)
return answer.content, cb.total_cost
def main():
init_page()
llm = select_model()
init_messages()
container = st.container()
response_container = st.container()
with container:
url = get_url_input()
is_valid_url = validate_url(url)
if not is_valid_url:
st.write('Please input valid url')
answer = None
else:
content = get_content(url)
if content:
prompt = build_prompt(content)
st.session_state.messages.append(HumanMessage(content=prompt))
with st.spinner("ChatGPT is typing ..."):
answer, cost = get_answer(llm, st.session_state.messages)
st.session_state.costs.append(cost)
else:
answer = None
if answer:
with response_container:
st.markdown("## Summary")
st.write(answer)
st.markdown("---")
st.markdown("## Original Text")
st.write(content)
costs = st.session_state.get('costs', [])
st.sidebar.markdown("## Costs")
st.sidebar.markdown(f"**Total cost: ${sum(costs):.5f}**")
for cost in costs:
st.sidebar.markdown(f"- ${cost:.5f}")
if __name__ == '__main__':
main() | [
"You are a helpful assistant."
] |
2024-01-10 | kazuki765/learn-lang-chain | src~youtube_summarizer.py | # https://www.youtube.com/watch?v=PMerSm2ToFY
import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.callbacks import get_openai_callback
from langchain.prompts import PromptTemplate
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import YoutubeLoader
from dotenv import load_dotenv
load_dotenv(verbose=True)
def init_page():
st.set_page_config(
page_title="Youtube Summarizer",
page_icon="🤗"
)
st.header("Youtube Summarizer 🤗")
st.sidebar.title("Options")
st.session_state.costs = []
def select_model():
model = st.sidebar.radio("Choose a model:", ("GPT-3.5", "GPT-4"))
if model == "GPT-3.5":
model_name = "gpt-3.5-turbo"
else:
model_name = "gpt-4"
return ChatOpenAI(temperature=0, model_name=model_name)
def get_url_input():
url = st.text_input("Youtube URL: ", key="input")
return url
def get_document(url):
with st.spinner("Fetching Content ..."):
loader = YoutubeLoader.from_youtube_url(
url,
add_video_info=True, # タイトルや再生数も取得できる
language=['en', 'ja'] # 英語→日本語の優先順位で字幕を取得
)
return loader.load()
def summarize(llm, docs):
prompt_template = """Write a concise Japanese summary of the following transcript of Youtube Video.
============
{text}
============
ここから日本語で書いてね
必ず4段落以内の400文字以内で簡潔にまとめること:
"""
PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"])
with get_openai_callback() as cb:
chain = load_summarize_chain(
llm,
chain_type="stuff",
verbose=True,
prompt=PROMPT
)
response = chain({"input_documents": docs}, return_only_outputs=True)
return response['output_text'], cb.total_cost
def main():
init_page()
llm = select_model()
container = st.container()
response_container = st.container()
with container:
url = get_url_input()
if url:
document = get_document(url)
with st.spinner("ChatGPT is typing ..."):
output_text, cost = summarize(llm, document)
st.session_state.costs.append(cost)
else:
output_text = None
if output_text:
with response_container:
st.markdown("## Summary")
st.write(output_text)
st.markdown("---")
st.markdown("## Original Text")
st.write(document)
costs = st.session_state.get('costs', [])
st.sidebar.markdown("## Costs")
st.sidebar.markdown(f"**Total cost: ${sum(costs):.5f}**")
for cost in costs:
st.sidebar.markdown(f"- ${cost:.5f}")
if __name__ == '__main__':
main() | [
"Write a concise Japanese summary of the following transcript of Youtube Video.\n\n============\n \n{text}\n\n============\n\nここから日本語で書いてね\n必ず4段落以内の400文字以内で簡潔にまとめること:\n"
] |
2024-01-10 | kazuki765/learn-lang-chain | src~youtube_summarizer_long.py | import streamlit as st
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.callbacks import get_openai_callback
from langchain.prompts import PromptTemplate
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import YoutubeLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from dotenv import load_dotenv
load_dotenv(verbose=True)
def init_page():
st.set_page_config(
page_title="Youtube Summarizer",
page_icon="🤗"
)
st.header("Youtube Summarizer 🤗")
st.sidebar.title("Options")
st.session_state.costs = []
def select_model():
model = st.sidebar.radio("Choose a model:", ("GPT-3.5", "GPT-3.5-16k", "GPT-4"))
if model == "GPT-3.5":
st.session_state.model_name = "gpt-3.5-turbo-0613"
elif model == "GPT-3.5-16k":
st.session_state.model_name = "gpt-3.5-turbo-16k-0613"
else:
st.session_state.model_name = "gpt-4"
# 300: 本文以外の指示のtoken数 (以下同じ)
st.session_state.max_token = OpenAI.modelname_to_contextsize(st.session_state.model_name) - 300
return ChatOpenAI(temperature=0, model_name=st.session_state.model_name)
def get_url_input():
url = st.text_input("Youtube URL: ", key="input")
return url
def get_document(url):
with st.spinner("Fetching Content ..."):
loader = YoutubeLoader.from_youtube_url(
url,
add_video_info=True, # タイトルや再生数も取得できる
language=['en', 'ja'] # 英語→日本語の優先順位で字幕を取得
)
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
model_name=st.session_state.model_name,
chunk_size=st.session_state.max_token,
chunk_overlap=0,
)
return loader.load_and_split(text_splitter=text_splitter)
def summarize(llm, docs):
prompt_template = """Write a concise Japanese summary of the following transcript of Youtube Video.
{text}
ここから日本語で書いてね:
"""
PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"])
with get_openai_callback() as cb:
chain = load_summarize_chain(
llm,
chain_type="map_reduce",
verbose=True,
map_prompt=PROMPT,
combine_prompt=PROMPT
)
response = chain(
{
"input_documents": docs,
# token_max を指示しないと、GPT3.5など通常の
# モデルサイズに合わせた内部処理になってしまうので注意
"token_max": st.session_state.max_token
},
return_only_outputs=True
)
return response['output_text'], cb.total_cost
def main():
init_page()
llm = select_model()
container = st.container()
response_container = st.container()
with container:
url = get_url_input()
document = get_document(url)
if document:
with st.spinner("ChatGPT is typing ..."):
output_text, cost = summarize(llm, document)
st.session_state.costs.append(cost)
else:
output_text = None
if output_text:
with response_container:
st.markdown("## Summary")
st.write(output_text)
st.markdown("---")
st.markdown("## Original Text")
st.write(document)
costs = st.session_state.get('costs', [])
st.sidebar.markdown("## Costs")
st.sidebar.markdown(f"**Total cost: ${sum(costs):.5f}**")
for cost in costs:
st.sidebar.markdown(f"- ${cost:.5f}")
if __name__ == '__main__':
main() | [
"Write a concise Japanese summary of the following transcript of Youtube Video.\n\n{text}\n\nここから日本語で書いてね:\n"
] |
2024-01-10 | UNN-SPKI/Nor-DeID-Evaluation | models~davinci_edit.py | """
davinci_edit is a wrapper around OpenAI's text-davinci-edit-001 model
and the redaction and pseudonymization task phrased for the model.
"""
import logging
import re
import time
from typing import List, Tuple, Union
import requests
import spacy
from joblib import Memory
from models.utilities.alignment import fix_orthography
from models.utilities.tags import list_annotations, remove_tags
CACHE_DIRECTORY = '.cache'
EXPECTED_TAGS = ['First_Name', 'Last_Name', 'Location', 'Health_Care_Unit', 'Age', 'Phone_Number', 'Social_Security_Number', 'Date']
def get_completion(source, instruction, openAIAPIKey, temperature, rate_limit = None):
if rate_limit:
time.sleep(rate_limit)
r = requests.post('https://api.openai.com/v1/edits',
json={
'model': 'text-davinci-edit-001',
'input': source,
'instruction': instruction,
'temperature': temperature
},
headers={
'Authorization': f'Bearer {openAIAPIKey}',
'Content-Type': 'application/json'
})
if r.status_code != requests.codes.ok:
logging.error(f"Got status code {r.status_code} from OpenAI.")
response = r.json()
return response
class DavinciEditModel:
def __init__(self, prompt, openAIAPIKey, rate_limit = 2, retries = 5):
self._prompt = prompt
self._openAIAPIKey = openAIAPIKey
self._rate_limit = rate_limit
self._retries = retries
self._memory = Memory(CACHE_DIRECTORY)
def predict(self, doc_bin: spacy.tokens.DocBin, language: spacy.Language, mode: str) -> Union[List[spacy.training.Example], List[str]]:
examples = []
for doc in doc_bin.get_docs(language.vocab):
logging.debug(f"Task: {doc.text}")
prediction = self.predict_task(doc.text)
logging.debug(f"Predicted: {prediction}")
if mode == 'replace':
examples.append(prediction)
continue
if remove_tags(prediction) != doc.text.rstrip():
logging.warning("Misaligned text!")
logging.warning(f"ORIGINAL: {doc.text}")
logging.warning(f"RETURNED: {remove_tags(prediction)}")
annotations = {'entities': list_annotations(prediction, EXPECTED_TAGS)}
logging.debug(f"Annotations: {annotations}")
example = spacy.training.Example.from_dict(doc, annotations)
examples.append(example)
return examples
def predict_task(self, source: str) -> str:
tries = 0
temperature = 0.0
instruction = self._prompt
while tries < self._retries:
get_cached_completion = self._memory.cache(get_completion)
response = get_cached_completion(source, instruction, self._openAIAPIKey, temperature, self._rate_limit)
if 'choices' not in response:
logging.error("Unexpected answer from OpenAI - could not find \'choices\'")
temperature += 0.01
tries += 1
continue
answer = response['choices'][0]['text']
return fix_orthography(answer)
logging.error(f'Could not get an edit after {self._retries} tries.')
return '' | [] |
2024-01-10 | UNN-SPKI/Nor-DeID-Evaluation | models~gpt_chat.py | """
gpt_chat is a wrapper around OpenAI's gpt-3.5-turbo model
and the redaction and pseudonymization task phrased for the model.
"""
import logging
import re
import time
from typing import List, Tuple, Union
import requests
import spacy
from joblib import Memory
from models.utilities.tags import list_annotations, remove_tags
CACHE_DIRECTORY = '.cache'
IGNORE_STARTS = ['Input:', 'Output:']
EXPECTED_TAGS = ['First_Name', 'Last_Name', 'Location', 'Health_Care_Unit', 'Age', 'Phone_Number', 'Social_Security_Number', 'Date', 'PHI']
def get_chat_completion(prompt, source, model, openAIAPIKey, temperature, rate_limit = None):
if rate_limit:
time.sleep(rate_limit)
prologue = [
{'role': 'system', 'content': prompt}
# {'role': 'user', 'content': 'Input: Georg Nordmann er 47 år gammel og innlagt på Haukeland siden 3. april . Georgs kone Åshild ønsker at vi ringer henne på telefon 770 12345 når vi vet mer .: '},
# {'role': 'assistant', 'content': '<First_Name>Georg</First_Name> <Last_Name>Nordmann</Last_Name> er <Age>47 år gammel</Age> og innlagt på <Location>Haukeland</Location> siden <Date>3. april</Date> . <First_Name>Georgs</First_Name> kone <First_Name>Åshild</First_Name> ønsker at vi ringer henne på telefon <Phone_Number>770 12345</Phone_Number> når vi vet mer .'},
]
messages = prologue + [{
'role': 'user', 'content': 'Input: ' + source
}]
r = requests.post('https://api.openai.com/v1/chat/completions',
json={
'model': model,
'messages': messages,
'temperature': temperature
},
headers={
'Authorization': f'Bearer {openAIAPIKey}',
'Content-Type': 'application/json'
})
if r.status_code != requests.codes.ok:
logging.error(f"Got status code {r.status_code} from OpenAI.")
response = r.json()
return response
def fix_orthography(answer: str) -> str:
space_punctuation = re.sub('\s*([,.])\s+', r' \1 ', answer).rstrip()
single_spaces = re.sub('\s+', ' ', space_punctuation)
return single_spaces
class GptChatModel:
def __init__(self, prompt, model, openAIAPIKey, rate_limit=2, retries=5):
self._model = model
self._prompt = prompt
self._openAIAPIKey = openAIAPIKey
self._rate_limit = rate_limit
self._retries = retries
self._memory = Memory(CACHE_DIRECTORY)
def predict(self, doc_bin: spacy.tokens.DocBin, language: spacy.Language, mode: str) -> Union[List[spacy.training.Example], List[str]]:
examples = []
for doc in doc_bin.get_docs(language.vocab):
logging.debug(f"Task: {doc.text}")
prediction = self.predict_task(doc.text)
if prediction.split()[0] in IGNORE_STARTS:
prediction = ' '.join(prediction.split()[1:])
logging.debug(f"Predicted: {prediction}")
if mode == 'replace':
examples.append(prediction)
continue
if remove_tags(prediction) != doc.text.rstrip():
logging.warning("Misaligned text!")
logging.warning(f"ORIGINAL: {doc.text}")
logging.warning(f"RETURNED: {remove_tags(prediction)}")
annotations = {'entities': list_annotations(prediction, EXPECTED_TAGS)}
logging.debug(f"Annotations: {annotations}")
example = spacy.training.Example.from_dict(doc, annotations)
examples.append(example)
return examples
def predict_task(self, source: str) -> str:
tries = 0
temperature = 0.0
while tries < self._retries:
get_cached_completion = self._memory.cache(get_chat_completion)
response = get_cached_completion(self._prompt, source, self._model, self._openAIAPIKey, temperature, self._rate_limit)
if 'choices' not in response:
logging.error(
"Unexpected answer from OpenAI - could not find \'choices\'")
temperature += 0.01
tries += 1
continue
answer = response['choices'][0]['message']['content']
return fix_orthography(answer)
logging.error(f'Could not get an edit after {self._retries} tries.')
return ''
| [
"Input: PLACEHOLDER"
] |
2024-01-10 | UnderTheTableHTV7/simplai_HTV7 | ai-api~simplify_cohere.py | import dotenv, os
import cohere
dotenv.load_dotenv('.env') # Use this in most cases
api_key = os.environ.get('COHERE_API_KEY')
co = cohere.Client(api_key)
def simplify_text_cohere(input_text):
return co.generate(prompt=input_text, model="ed5b1a41-0e40-4071-979e-3f0204d119d4-ft", max_tokens=50, temperature=0.9).generations[0].text
| [] |
2024-01-10 | ShreyashSingh1/Abhi-theek-kar-k-deta-hu | Language%20detector.py | import speech_recognition as sr
from langdetect import detect
# Initialize recognizer class (for recognizing the speech)
# r = sr.Recognizer()
# from os import path
# AUDIO_FILE = path.join(path.dirname(path.realpath(__file__)), "Recording .wav")
# AUDIO_FILE = path.join(path.dirname(path.realpath(__file__)), "french.aiff")
# AUDIO_FILE = path.join(path.dirname(path.realpath(__file__)), "chinese.flac")
use the audio file as the audio source
r = sr.Recognizer()
with sr.AudioFile(AUDIO_FILE) as source:
audio = r.record(source) # read the entire audio file
# Reading Microphone as source
# listening the speech and store in audio_text variable
speech_file = "Recording.wav"
with open(speech_file, "rb") as audio_file:
content = audio_file.read()
with sr.Microphone() as source:
print("Talk")
audio_text = r.listen(source, timeout=5, phrase_time_limit=10)
print(type(audio_text))
print("Time over, thanks")
# recoginize_() method will throw a request error if the API is unreachable, hence using exception handling
try:
# using google speech recognition
text = r.recognize_google(audio)
print("Text: "+ text)
print("Language: "+ detect(text))
except Exception as e:
print("Sorry, I did not get that")
print(e)
from googletrans import Translator, constants
from pprint import pprint
# translate a spanish text to arabic for instance
# init the Google API translator
translator = Translator()
translation = translator.translate(text, dest="en")
print(f"{translation.origin} ({translation.src}) --> {translation.text} ({translation.dest})")
pprint(translation)
translation1 = translator.translate("are you ok are you not ok my name", dest="hi")
# print(f"{translation1.origin} ({translation1.src}) --> {translation1.text} ({translation1.dest})")
# pprint(translation1)
# print((f"{translation1.origin} ({translation1.src}) --> {translation1.text} ({translation1.dest})").encode("utf-8", "replace").decode("utf-8"))
pprint((f"{translation1.origin} ({translation1.src}) --> {translation1.text} ({translation1.dest})").encode("utf-8", "replace").decode("utf-8"))
pprint(translation1)
from gtts import gTTS
def text_to_speech(text, lang, filename):
tts = gTTS(text=text, lang=lang)
tts.save(filename)
text_to_speech("Hello i am shreyash", 'en', 'english.mp3')
text_to_speech('Hola, ¿cómo estás?', 'es', 'spanish.mp3')
text_to_speech('Bonjour, comment ça va?', 'fr', 'french.mp3')
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
# Create a new chatbot named Charlie
chatbot = ChatBot('Charlie')
# Create a new trainer for the chatbot
trainer = ChatterBotCorpusTrainer(chatbot)
# Now train the chatbot with the English greetings corpus
trainer.train("chatterbot.corpus.english.greetings")
# Get a response to the input text 'Hello, how are you?'
response = chatbot.get_response('Hello, how are you?')
print(response)
import openai
import os
openai.api_key = "org-lSM2FGW3BNpSIMozUeR8R8IJ"
def get_completion(prompt, model="gpt-3.5-turbo"):
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0, # this is the degree of randomness of the model's output
)
return response.choices[0].message["content"]
text = f"""
You should express what you want a model to do by \
providing instructions that are as clear and \
specific as you can possibly make them. \
This will guide the model towards the desired output, \
and reduce the chances of receiving irrelevant \
or incorrect responses. Don't confuse writing a \
clear prompt with writing a short prompt. \
In many cases, longer prompts provide more clarity \
and context for the model, which can lead to \
more detailed and relevant outputs.
"""
prompt = f"""
Summarize the text delimited by triple backticks \
into a single sentence.
```{text}```
"""
response = get_completion(prompt)
print(response)
text = '''[Your Company Name] - Terms and Conditions
1. Acceptance of Terms:
By accessing or using [Your Company Name]'s products and services, you agree to be bound by these Terms and Conditions.
2. Product Information:
[Your Company Name] reserves the right to modify, update, or discontinue products without prior notice. Product descriptions and specifications are subject to change.
3. Ordering and Payment:
a. All orders are subject to acceptance by [Your Company Name].
b. Prices are in [currency] and are subject to change without notice.
c. Payment must be made in full before the product is shipped.
4. Shipping and Delivery:
a. [Your Company Name] will make reasonable efforts to ensure timely delivery but is not responsible for delays beyond its control.
b. Risk of loss or damage passes to the customer upon delivery.
5. Warranty:
a. [Your Company Name] provides a limited warranty on its products. Please refer to the warranty statement for details.
b. The warranty is void if the product has been tampered with, modified, or repaired by unauthorized personnel.
6. Returns and Refunds:
a. Customers may return products within a specified period for a refund, subject to [Your Company Name]'s return policy.
b. Refunds will be issued in the original form of payment.
7. Intellectual Property:
a. All intellectual property rights related to [Your Company Name]'s products are owned by [Your Company Name].
b. Users are prohibited from reproducing, distributing, or using any content without explicit permission.
8. Limitation of Liability:
[Your Company Name] is not liable for any indirect, incidental, special, or consequential damages arising out of or in connection with the use of its products.
9. Governing Law:
These Terms and Conditions are governed by the laws of [Your Jurisdiction].
10. Modification of Terms:
[Your Company Name] reserves the right to update or modify these Terms and Conditions at any time without prior notice.
Contact Information:
[Your Company Name]
[Address]
[Email]
[Phone]'''
import os
# Set environment variable
os.environ['REPLICATE_API_TOKEN'] = 'r8_534R8BS65dTbyBgPvaBMU2kwAqOr6VY2cFEx3'
# Access the environment variable
value = os.environ.get('REPLICATE_API_TOKEN')
print(value)
import os
REPLICATE_API_TOKEN = 'r8_534R8BS65dTbyBgPvaBMU2kwAqOr6VY2cFEx3'
os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN
import replicate
output = replicate.run(
"replicate/vicuna-13b:6282abe6a492de4145d7bb601023762212f9ddbbe78278bd6771c8b3b2f2a13b",
input={"prompt": "Summarize the terms and condition which is given below" + text, "max_length": 10000}
)
# The replicate/vicuna-13b model can stream output as it's running.
# The predict method returns an iterator, and you can iterate over that output.
for item in output:
# https://replicate.com/replicate/vicuna-13b/api#output-schema
print(item, end="") | [
"\nSummarize the text delimited by triple backticks \\ \ninto a single sentence.\n```PLACEHOLDER```\n"
] |
2024-01-10 | HalleluyahBoy/chat | modified.py | import openai
import gradio
openai.api_key = "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"
messages = [{"role": "system", "content": "You are a financial experts that specializes in real estate investment and negotiation"}]
def CustomChatGPT(user_input):
messages.append({"role": "user", "content": user_input})
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = messages
)
ChatGPT_reply = response["choices"][0]["message"]["content"]
messages.append({"role": "assistant", "content": ChatGPT_reply})
return ChatGPT_reply
demo = gradio.Interface(fn=CustomChatGPT, inputs = "text", outputs = "text", title = "Real Estate Pro")
demo.launch(share=True) | [
"You are a financial experts that specializes in real estate investment and negotiation"
] |
2024-01-10 | HalleluyahBoy/chat | chat_assistant.py | import openai
openai.api_key = "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"
messages = []
system_msg = input("What type of chatbot would you like to create?\n")
messages.append({"role": "system", "content": system_msg})
print("Your new assistant is ready!")
while input != "quit()":
message = input()
messages.append({"role": "user", "content": message})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages)
reply = response["choices"][0]["message"]["content"]
messages.append({"role": "assistant", "content": reply})
print("\n" + reply + "\n") | [] |
2024-01-10 | HalleluyahBoy/chat | simple.py | import openai
openai.api_key = "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"
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "who is the current president of nigeria "}])
print(completion.choices[0].message.content)
| [
"who is the current president of nigeria "
] |
2024-01-10 | Salah-boutayeb/genAI | src~services~service.py | from sklearn.metrics.pairwise import cosine_similarity
import cohere
import os
import numpy as np
import pandas as pd
from dotenv import load_dotenv
from sklearn.metrics.pairwise import cosine_similarity
from annoy import AnnoyIndex
load_dotenv()
key = os.getenv('api_key')
co = cohere.Client(key)
result = os.path.dirname(os.path.realpath('__file__'))
relative_path = ".\\data\\embeddings.npy"
full_path = os.path.join(result, relative_path)
def calculate_embeddings():
df = pd.read_csv(
"C:\\Users\\bouta\\OneDrive\\Bureau\\hackathons\\nextGenAI\\data\\data.csv")
embeds = co.embed(texts=list(
df['job_description']), model='embed-english-v2.0').embeddings
all_embeddings = np.array(embeds)
np.save(full_path, all_embeddings)
def embed_text(texts):
embeddings = co.embed(
model='embed-english-v2.0',
texts=texts)
embeddings = np.array(embeddings.embeddings)
return embeddings
def get_similarity(target):
cosine_similarity_list = []
all_embeddings = np.load(
"C:\\Users\\bouta\\OneDrive\\Bureau\\hackathons\\nextGenAI\\data\\embeddings.npy")
for i in range(len(all_embeddings)):
# Calculate the dot product of the two arrays
dot_product = np.dot(target, all_embeddings[i])
# Calculate the norm of the two arrays
norm_embedding_for_recipe_test = np.linalg.norm(target)
norm_embedding = np.linalg.norm(all_embeddings[i])
# Calculate the cosine similarity
cosine_similarity = dot_product / \
(norm_embedding_for_recipe_test * norm_embedding)
cosine_similarity_list.append((i, np.array((cosine_similarity))[0]))
# Sort the list
cosine_similarity_list.sort(key=lambda x: x[1], reverse=True)
# Select the top 3 by ID
top_3 = [x for x in cosine_similarity_list[:3]]
return top_3
def main():
# calculate_embeddings()
target = ["Computer Science Engineering,French,English,Windows,Linux,Oracle,MySQL,PostgreSQL,MongoDB,SQLITE3,Python,Java,Javascript,c/c++,php,HTML,CSS,JS,React,Vuejs,Django,Flask,Node js,Expressjs,Laravel,Docker,Kubernetes,Git,VRP,IOT,biometric,attendency detection,web-sockets,arduino,M2M Services,El Jadida,Python,html,css,javascript,Kubernetes,Docker,CI/CD,Tekton CI/CD,Laravel,Vue.js,functional requirements"]
print(get_similarity(embed_text(target)))
if __name__ == "__main__":
main()
| [] |
2024-01-10 | Salah-boutayeb/genAI | src~utils~resumeClassificationGPT.py |
import ast
import os
import fitz
import openai
from dotenv import load_dotenv
load_dotenv()
key = os.getenv('openai_key')
openai.api_key = key
def extract_text_from_pdf(pdf_file):
with fitz.open(pdf_file) as pdf_document:
text = ""
for page_num in range(pdf_document.page_count):
page = pdf_document[page_num]
text += page.get_text()
return text
def generate_data_prompt(resume_text):
# Save the extracted data
prompt = """
Write Knowledge with the same form.
Input: "+212 648265022 - HTML / CSS / JAVASCRIPT - Hackathon H-LSID : 2nd Place < ENSET Mohammedia > [email protected] linkedin.com/in /badrjarmouni EDUCATION HACKATHONS SKILLS CONTACT EXPERIENCE Dadupa Consulting: 4 months (PFA) << Social Network >>: Connecting project holders with investors/mentors ( ReactJs , Lumen ) - 2021/2022 INTERNSHIPS: JARMOUNI - ReactJS / NodeJS /ExpressJS - PHP < Laravel,Lumen > - Mongodb / Firebase / MySQL - Java < JEE , SpringBoot > - Flutter - Python < Flask > - Swift / C / C++ - Hackathon HackTheFuture : 3rd Place < ENSA Marrakech > DEUST: Mathematics,Computer Science,Physics < MIP > 2018 - 2020 LST (Bachelor 's Degree): Computer Science Engineering 2020 - 2021 License en Sciences et Techniques MST (Master's Degree): Networks and Computer Systems < RSI > 2021 - 2023 Master en Sciences et Techniques FST SETTAT - FST SETTAT - FST SETTAT - - SCRUM National Innovation Comp\\\\u00e9tition for Entrepreneurship IT cell member EXTRACURRICULAR: PROJECTS: DroneWay: Planning optimal paths for drone deliveries <Python, Flask, VueJS> Design and creation of a desktop application for the management of internships in companies <Java> Creation of a remote control system for domestic devices <Nodemcu> Mobile Application (Quizz) <Flutter, Firebase> LANGUAGE SKILLS English: French: Felicity Optics: 3 months (PFE) Design and creation of an e-commerce website for optics and eyewear Laravel - 2020/2021 Design and creation of the platform ALTWAY that automates the recruitment process (RH) <SpringBoot, React> Design and creation of NICE website National Innovation Competition for Entrepreneurship - React , Django - "
Knowledge: ["SCRUM","PHP","Laravel","ReactJS","NodeJS","ExpressJS","HTML","CSS","JAVASCRIPT","Mongodb","Firebase","MySQL","JEE","SpringBoot","Flutter","Python","Swift","English","French","Networks and Computer Systems","Computer Science Engineering"]
Input: "SKILLS Languages: Arabic, French, Anglais. OS: Windows, UNIX. Modeling & Databases UML, SQL Server, Oracle, Firebase. Electronic: Proteus ISIS & ARES, Arduino, MPLAB. Frontend: HTML, CSS, Bootstrap, React JS, Angular, Vue JS. Programming Languages & Framework: Flutter, Swift, JAVA, JEE, Spring, PHP, C/C++, Assembleur, JavaScript, Node JS, Python, Tensor Flow. PARA-UNIVERSITY ACTIVITIES & EXPERIENCES NICE: Vice president 24th Promotion Award Ceremony: Member of the committee GDSC UH1: Design Lead 21\\\\u2019 (Core team) FST Settat 21\\\\u2019 integration week: Media & design manager CTDE: Design manager ENACTUS FST Settat , CLIC , JLM: Member INTERESTS Reading chess Design Swimming Certi\\\\ufb01cates Udemy, Google AI, Coursera\\\\u2026 Certi\\\\ufb01cates (link) EDUCATION FACULTY OF SCIENCE AND TECHNOLOGY SETTAT - UH1 (FSTS) \\\\u2794 Engineering Degree: Computer Engineering, option: software quality. \\\\u2794 University degree in electrical engineering and mechanical engineering. HIGHER TECHNICIAN CERTIFICATE CENTER SETTAT (BTS) \\\\u2794 Higher technician diploma: Electronic Systems. TECHNICAL QUALIFIER HIGH SCHOOL SETTAT (LQTS) \\\\u2794 Baccalaureate: Science and electrical technology. PROFESSIONAL EXPERIENCES Capgemini Engineering : Application Internship \\\\u2794 Creating an application to automate the generation of ALTIS/ISS monitoring dashboards. \\\\u2794 Technologies used: Python, SQLite SMART CAR CONNECT: End of year Internship \\\\u2794 Designing and developing the OTOMOTO marketplace (platform for the sale of car spare parts). \\\\u2794 Technologies used: React JS, Node JS INTELLCAP: Application Internship \\\\u2794 Developing algorithms for predicting and optimizing kerosene consumption in aviation using AI. \\\\u2794 Technologies used: Python, Deep learning PROFESSIONAL PROJECTS DroneWay (Optimization of drone routes): Vue.JS, Python, Flask, VRP Cluedo game (Web): JEE, HTML, Bootstrap, JS, MySQL Autodiam (Mobile App for RAC detection): FLutter, Python, \\\\ufb01rebase, Flask Data Centre supervision (IOT): NodeRed, Node JS, Web sockets, C Nerds (Mobile app for general knowledge in IT): FLutter, \\\\ufb01rebase, Python Young PLASTE (youngplaste.me): UX, UI, CSS, JS Data visualisation (Web): Oracle (PLSQL), Python, Flask, HTML, Tailwind CSS Student management (Mobile): SWIFT, Core Data Item management (Web): JEE, HTML, CSS Chat App (Web client/serveur): C, Web sockets, JS, HTML Management of internships in companies (Desktop): JAVA, SWING, MySQL Recommendation system: Python, factor analysis (ACP, AFC) Remote control (IOT): NODEMCU, HTTP, HTML, CSS Smart home (IOT): Raspberry Pi, NODEMCU, NodeRed, MQTT hotel management (Web): v1.0 -> SQL Server (TSQL), C | v2.0 -> HTML, CSS, ASP, VB COMPETITIONS & ACHIEVEMENTS Program: Explorer Innovation Fund (UM6P & MIT Sandbox) Top 10 projects: Cluster Maintenance 4.0 at STARTGATE 3rd place: Hackathon HTF 2022 at ENSA Marrakech Participation: Hult Prize Regional summit online at Lisbonne 2nd prize: Hackathon LSID 2022 at ENSET Mohammedia Participation: Entrepreneur FAIR 2022 competition at ENSA Berrechid 1st prize: Hult Prize on campus 2022 at Cit\\\\u00e9 d\\\\u2019innovation UH1 1st prize: The pitch competition (5th Edition) at ENCG Settat Incubation program: U-founders at UM6P Participation: Let\\\\u2019s challenge 6.0 at EST Beni-Mellal Presentation of Hassan 1st University in the \\\\u201cFABLAB universitaire\\\\u201d at USMS Mohamed LAHARRAF Software engineering student Mobile: +212 695 587 968 Address: Casablanca-Settat, Morocco Email: [email protected] LinkedIn: Mohamed LAHARRAF (link) GitHub: github.com/mohamedlaharraf (link) Portfolio: mohamedlaharraf.engineer 2018 - 2023 2020 - 2021 2017 - 2018 Casablanca, Aug. - Oct. 2022 RABAT, July - Sept. 2022 RABAT, July - Sept. 2021 Sept. 2022 July 2022 May 2022 May 2022 April 2022 April 2022 March 2022 March 2022 Feb - Sept. 2022 Nov. 2021 "
Knowledge: ["Arabic","French","English","Windows","UNIX","UML","SQL Server","Oracle","Firebase","Flutter","Swift","JAVA","JEE","Spring","PHP","C/C++","Assembleur","JavaScript","Node JS","Python","Tensor Flow","Python","SQLite","React JS","Node JS","Engineering Degree","Computer Engineering","software quality","Electronic Systems"]
Input: "Realization of a remote control system for domestic appliances with Nodemcu Creation of the website NICE National Innovation Competition for Entrepreneurship - React , Django - 2nd place hackathon H-LSID ENSET Mohammedia linkedin.com/in/beraichimane/ Quiz : Mobile Application - Flutter , NodeJs - CERTIFICATIONS English : Fluent Arabic : Native French : TCF B2 LANGUAGES EXPERIENCES Internships Para-academics: Projects Geo4net / Web Developer ( 7 mois ) Smart Car Connect / Web Developer ( 2 mois ) Club BEEA / FST Settat Club Ihssen / FST Settat Design and construction of the platform KHASNI to connect between a service provider and a service requester - Spring , Angular - Design and creation of a vehicle\\\\u2019s spare parts marketplace Design and construction of the platform INTERVERSITY to connect between students and companies - Spring , Angular - 2018 - 2020 Hackathons Treasurer Head of Media Unit / Design cell member DroneWay : Planning of optimal routes for deliveries by an autonomous drone - Flask, VueJs, Python - Design and production of a desktop application for the management of internships in company - Java Swing - 3rd place hackathon HTFH ENSA Marrakech ( Juillet - Septembre ) 2022 Implementation of a real-time geolocation web application and vehicle feet management ( Juin - Decembre ) 2021 2018 - 2019 HTML / CSS / JAVASCRIPT / TYPE SCRIPT C / C++ / SWIFT PHP / JAVA / JAVA EE / SPRING DART / FLUTTER PYTHON / FLASK / DJANGO HUAWEI : HCSA STORAGE GOOGLE : PROJECT MANAGEMENT OPENCLASSROOMS: JAVA EE ANGULAR / REACT JS SKILLS Master : Network and Computer systems Hassan First University I FST Settat LST : Computer Engineering Hassan First University I FST Settat Deust : Mathematics, Computer Science, Physics Hassan First University I FST Settat Baccalaureate : Physics High school Tarik Ibnu Ziad I Meknes EDUCATION 2021 - 2023 2020 - 2021 2017 - 2020 2016 - 2017 CONTACT [email protected] +212 708-038348 BERAICH Imane Student in 2nd year Master RSI "
Knowledge: ["Network and Computer systems","Computer Engineering","English","French","Arabic","Flask","VueJs","Python","Spring","Angular","Nodemcu","Java","Swing","React","Django","Flutter","NodeJs","HTML","CSS","JAVASCRIPT","TYPE SCRIPT"]
Input: "Engineer's degree in Software Engineering FST Settat - Engineering Degree 2020-2023 University Diploma of Technology in Multi\\\\ufffd media and Web Design ENS T\\\\u00e9touan - DUT 2018-2020 Education - DroneWay: Planning of optimal routes for deliveries by an autonomous drone (Flask, VueJs, Python). - Intelligent patient monitoring system (D jango,ML,Raspberry Pi). - Cluedo: Multiplayer game (Spring,Web Sockets). - AjiTechri: E-commerce (Java EE). - Movie recommendation system (Flask, TensorFlow) - Drowsiness detection (Raspberry, Machine Learning). - ... Projects: Club Enactus: Project Manager. Club GDSC (Google Developer Student Clubs): Head of WEB development unit. 08/2021-08/2022 Para-academics: Hirschmann Automative: program- ming of an ERP web application using php. 07/2019-08/2019 (2 months) SHM: developed a web application using Laravel for the management of the different sectors of associations/ companies. 04/2020-07/2020 (4 months) Luova Tech: development of showcase websites using Wordpress, and web applications with Angular for different clients. 08/2021-09/2021 (2 months) Dadupa: creation of a platform for connecting project holders and inves- tors / business angels (React, Lumen and Docker). 07/2022-09/2022 (3 months) Internships: Work experience Hackathons - 2nd place hackathon H-LSID ENSET Mohammedia. - 3rd place hackathon HTFH ENSA Marrakech. - 1st place hackathon BCG Platinion (National and International). Certi\\\\ufb01cations - Huawei: HCIA Cloud Computing - Huawei: HCIA Big Data - The Hong Kong University of Science and Technology: Angular - Google: Project Management Specialization - OpenClassrooms: Java EE - Scrimba: Frontend Developper Career Path - Google\\\\u2019s Qwiklabs: Cloud Solutions Skills - HTML5 / CSS3 / JavaScript(ES6+) - PHP (Laravel / Wordpress) - Java (JEE, Spring) - Python (Flask / D jango) - Machine Learning (TensorFlow) - Angular / React - Firebase / MySQL - Flutter / React Native - Docker / Kubernetes - JIRA [email protected] www.anasbouabid.com linkedin.com/in/anas-bouabid +212 645 23 77 40 ANAS BOUABID 5th year, Software Engineering Student "
Knowledge: ["Software Engineering","Multi-media and Web Design","Flask","VueJs","Python","Django","ML","Raspberry Pi","Spring","Web Sockets","Java EE","TensorFlow","Enactus","Google Developer Student Clubs","PHP","Laravel","Angular","Wordpress","Docker","Lumen","HTML5","CSS3","JavaScript","ES6","JEE","Spring","Firebase","MySQL","Flutter","React Native","Kubernetes","JIRA","HCIA Cloud Computing","HCIA Big Data","Angular","Project Management","Java EE","Frontend Developer","Cloud Solutions"]
Input: """ + resume_text.replace('\\\\n', ' ') + """
Knowledge: """
messages = [{"role": "system", "content": "you should behave like human resources specialist"},
{"role": "system", "content": prompt}]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=messages,
)
result = ast.literal_eval(response["choices"][0]["message"]["content"])
return ",".join(result)
def main():
# Get the path to the resume PDF and job description text file
# resume_pdf_path = input("Enter the path to the resume PDF file: ")
resume_text = extract_text_from_pdf(
"C:\\Users\\bouta\\OneDrive\\Bureau\\hackathons\\nextGenAI\\data\\SalahEddine-BOUTAYEB_RESUME.pdf")
extracted_data = generate_data_prompt(resume_text)
print(extracted_data)
if __name__ == "__main__":
main()
| [
" ",
"\n Write Knowledge with the same form.\n\n Input: \"+212 648265022 - HTML / CSS / JAVASCRIPT - Hackathon H-LSID : 2nd Place < ENSET Mohammedia > [email protected] linkedin.com/in /badrjarmouni EDUCATION HACKATHONS SKILLS CONTACT EXPERIENCE Dadupa Consulting: 4 months (PFA) << Social Network >>: Connecting project holders with investors/mentors ( ReactJs , Lumen ) - 2021/2022 INTERNSHIPS: JARMOUNI - ReactJS / NodeJS /ExpressJS - PHP < Laravel,Lumen > - Mongodb / Firebase / MySQL - Java < JEE , SpringBoot > - Flutter - Python < Flask > - Swift / C / C++ - Hackathon HackTheFuture : 3rd Place < ENSA Marrakech > DEUST: Mathematics,Computer Science,Physics < MIP > 2018 - 2020 LST (Bachelor 's Degree): Computer Science Engineering 2020 - 2021 License en Sciences et Techniques MST (Master's Degree): Networks and Computer Systems < RSI > 2021 - 2023 Master en Sciences et Techniques FST SETTAT - FST SETTAT - FST SETTAT - - SCRUM National Innovation Comp\\\\u00e9tition for Entrepreneurship IT cell member EXTRACURRICULAR: PROJECTS: DroneWay: Planning optimal paths for drone deliveries <Python, Flask, VueJS> Design and creation of a desktop application for the management of internships in companies <Java> Creation of a remote control system for domestic devices <Nodemcu> Mobile Application (Quizz) <Flutter, Firebase> LANGUAGE SKILLS English: French: Felicity Optics: 3 months (PFE) Design and creation of an e-commerce website for optics and eyewear Laravel - 2020/2021 Design and creation of the platform ALTWAY that automates the recruitment process (RH) <SpringBoot, React> Design and creation of NICE website National Innovation Competition for Entrepreneurship - React , Django - \"\n Knowledge: [\"SCRUM\",\"PHP\",\"Laravel\",\"ReactJS\",\"NodeJS\",\"ExpressJS\",\"HTML\",\"CSS\",\"JAVASCRIPT\",\"Mongodb\",\"Firebase\",\"MySQL\",\"JEE\",\"SpringBoot\",\"Flutter\",\"Python\",\"Swift\",\"English\",\"French\",\"Networks and Computer Systems\",\"Computer Science Engineering\"]\n\n Input: \"SKILLS Languages: Arabic, French, Anglais. OS: Windows, UNIX. Modeling & Databases UML, SQL Server, Oracle, Firebase. Electronic: Proteus ISIS & ARES, Arduino, MPLAB. Frontend: HTML, CSS, Bootstrap, React JS, Angular, Vue JS. Programming Languages & Framework: Flutter, Swift, JAVA, JEE, Spring, PHP, C/C++, Assembleur, JavaScript, Node JS, Python, Tensor Flow. PARA-UNIVERSITY ACTIVITIES & EXPERIENCES NICE: Vice president 24th Promotion Award Ceremony: Member of the committee GDSC UH1: Design Lead 21\\\\u2019 (Core team) FST Settat 21\\\\u2019 integration week: Media & design manager CTDE: Design manager ENACTUS FST Settat , CLIC , JLM: Member INTERESTS Reading chess Design Swimming Certi\\\\ufb01cates Udemy, Google AI, Coursera\\\\u2026 Certi\\\\ufb01cates (link) EDUCATION FACULTY OF SCIENCE AND TECHNOLOGY SETTAT - UH1 (FSTS) \\\\u2794 Engineering Degree: Computer Engineering, option: software quality. \\\\u2794 University degree in electrical engineering and mechanical engineering. HIGHER TECHNICIAN CERTIFICATE CENTER SETTAT (BTS) \\\\u2794 Higher technician diploma: Electronic Systems. TECHNICAL QUALIFIER HIGH SCHOOL SETTAT (LQTS) \\\\u2794 Baccalaureate: Science and electrical technology. PROFESSIONAL EXPERIENCES Capgemini Engineering : Application Internship \\\\u2794 Creating an application to automate the generation of ALTIS/ISS monitoring dashboards. \\\\u2794 Technologies used: Python, SQLite SMART CAR CONNECT: End of year Internship \\\\u2794 Designing and developing the OTOMOTO marketplace (platform for the sale of car spare parts). \\\\u2794 Technologies used: React JS, Node JS INTELLCAP: Application Internship \\\\u2794 Developing algorithms for predicting and optimizing kerosene consumption in aviation using AI. \\\\u2794 Technologies used: Python, Deep learning PROFESSIONAL PROJECTS DroneWay (Optimization of drone routes): Vue.JS, Python, Flask, VRP Cluedo game (Web): JEE, HTML, Bootstrap, JS, MySQL Autodiam (Mobile App for RAC detection): FLutter, Python, \\\\ufb01rebase, Flask Data Centre supervision (IOT): NodeRed, Node JS, Web sockets, C Nerds (Mobile app for general knowledge in IT): FLutter, \\\\ufb01rebase, Python Young PLASTE (youngplaste.me): UX, UI, CSS, JS Data visualisation (Web): Oracle (PLSQL), Python, Flask, HTML, Tailwind CSS Student management (Mobile): SWIFT, Core Data Item management (Web): JEE, HTML, CSS Chat App (Web client/serveur): C, Web sockets, JS, HTML Management of internships in companies (Desktop): JAVA, SWING, MySQL Recommendation system: Python, factor analysis (ACP, AFC) Remote control (IOT): NODEMCU, HTTP, HTML, CSS Smart home (IOT): Raspberry Pi, NODEMCU, NodeRed, MQTT hotel management (Web): v1.0 -> SQL Server (TSQL), C | v2.0 -> HTML, CSS, ASP, VB COMPETITIONS & ACHIEVEMENTS Program: Explorer Innovation Fund (UM6P & MIT Sandbox) Top 10 projects: Cluster Maintenance 4.0 at STARTGATE 3rd place: Hackathon HTF 2022 at ENSA Marrakech Participation: Hult Prize Regional summit online at Lisbonne 2nd prize: Hackathon LSID 2022 at ENSET Mohammedia Participation: Entrepreneur FAIR 2022 competition at ENSA Berrechid 1st prize: Hult Prize on campus 2022 at Cit\\\\u00e9 d\\\\u2019innovation UH1 1st prize: The pitch competition (5th Edition) at ENCG Settat Incubation program: U-founders at UM6P Participation: Let\\\\u2019s challenge 6.0 at EST Beni-Mellal Presentation of Hassan 1st University in the \\\\u201cFABLAB universitaire\\\\u201d at USMS Mohamed LAHARRAF Software engineering student Mobile: +212 695 587 968 Address: Casablanca-Settat, Morocco Email: [email protected] LinkedIn: Mohamed LAHARRAF (link) GitHub: github.com/mohamedlaharraf (link) Portfolio: mohamedlaharraf.engineer 2018 - 2023 2020 - 2021 2017 - 2018 Casablanca, Aug. - Oct. 2022 RABAT, July - Sept. 2022 RABAT, July - Sept. 2021 Sept. 2022 July 2022 May 2022 May 2022 April 2022 April 2022 March 2022 March 2022 Feb - Sept. 2022 Nov. 2021 \"\n Knowledge: [\"Arabic\",\"French\",\"English\",\"Windows\",\"UNIX\",\"UML\",\"SQL Server\",\"Oracle\",\"Firebase\",\"Flutter\",\"Swift\",\"JAVA\",\"JEE\",\"Spring\",\"PHP\",\"C/C++\",\"Assembleur\",\"JavaScript\",\"Node JS\",\"Python\",\"Tensor Flow\",\"Python\",\"SQLite\",\"React JS\",\"Node JS\",\"Engineering Degree\",\"Computer Engineering\",\"software quality\",\"Electronic Systems\"]\n\n Input: \"Realization of a remote control system for domestic appliances with Nodemcu Creation of the website NICE National Innovation Competition for Entrepreneurship - React , Django - 2nd place hackathon H-LSID ENSET Mohammedia linkedin.com/in/beraichimane/ Quiz : Mobile Application - Flutter , NodeJs - CERTIFICATIONS English : Fluent Arabic : Native French : TCF B2 LANGUAGES EXPERIENCES Internships Para-academics: Projects Geo4net / Web Developer ( 7 mois ) Smart Car Connect / Web Developer ( 2 mois ) Club BEEA / FST Settat Club Ihssen / FST Settat Design and construction of the platform KHASNI to connect between a service provider and a service requester - Spring , Angular - Design and creation of a vehicle\\\\u2019s spare parts marketplace Design and construction of the platform INTERVERSITY to connect between students and companies - Spring , Angular - 2018 - 2020 Hackathons Treasurer Head of Media Unit / Design cell member DroneWay : Planning of optimal routes for deliveries by an autonomous drone - Flask, VueJs, Python - Design and production of a desktop application for the management of internships in company - Java Swing - 3rd place hackathon HTFH ENSA Marrakech ( Juillet - Septembre ) 2022 Implementation of a real-time geolocation web application and vehicle feet management ( Juin - Decembre ) 2021 2018 - 2019 HTML / CSS / JAVASCRIPT / TYPE SCRIPT C / C++ / SWIFT PHP / JAVA / JAVA EE / SPRING DART / FLUTTER PYTHON / FLASK / DJANGO HUAWEI : HCSA STORAGE GOOGLE : PROJECT MANAGEMENT OPENCLASSROOMS: JAVA EE ANGULAR / REACT JS SKILLS Master : Network and Computer systems Hassan First University I FST Settat LST : Computer Engineering Hassan First University I FST Settat Deust : Mathematics, Computer Science, Physics Hassan First University I FST Settat Baccalaureate : Physics High school Tarik Ibnu Ziad I Meknes EDUCATION 2021 - 2023 2020 - 2021 2017 - 2020 2016 - 2017 CONTACT [email protected] +212 708-038348 BERAICH Imane Student in 2nd year Master RSI \"\n Knowledge: [\"Network and Computer systems\",\"Computer Engineering\",\"English\",\"French\",\"Arabic\",\"Flask\",\"VueJs\",\"Python\",\"Spring\",\"Angular\",\"Nodemcu\",\"Java\",\"Swing\",\"React\",\"Django\",\"Flutter\",\"NodeJs\",\"HTML\",\"CSS\",\"JAVASCRIPT\",\"TYPE SCRIPT\"]\n\n Input: \"Engineer's degree in Software Engineering FST Settat - Engineering Degree 2020-2023 University Diploma of Technology in Multi\\\\ufffd media and Web Design ENS T\\\\u00e9touan - DUT 2018-2020 Education - DroneWay: Planning of optimal routes for deliveries by an autonomous drone (Flask, VueJs, Python). - Intelligent patient monitoring system (D jango,ML,Raspberry Pi). - Cluedo: Multiplayer game (Spring,Web Sockets). - AjiTechri: E-commerce (Java EE). - Movie recommendation system (Flask, TensorFlow) - Drowsiness detection (Raspberry, Machine Learning). - ... Projects: Club Enactus: Project Manager. Club GDSC (Google Developer Student Clubs): Head of WEB development unit. 08/2021-08/2022 Para-academics: Hirschmann Automative: program- ming of an ERP web application using php. 07/2019-08/2019 (2 months) SHM: developed a web application using Laravel for the management of the different sectors of associations/ companies. 04/2020-07/2020 (4 months) Luova Tech: development of showcase websites using Wordpress, and web applications with Angular for different clients. 08/2021-09/2021 (2 months) Dadupa: creation of a platform for connecting project holders and inves- tors / business angels (React, Lumen and Docker). 07/2022-09/2022 (3 months) Internships: Work experience Hackathons - 2nd place hackathon H-LSID ENSET Mohammedia. - 3rd place hackathon HTFH ENSA Marrakech. - 1st place hackathon BCG Platinion (National and International). Certi\\\\ufb01cations - Huawei: HCIA Cloud Computing - Huawei: HCIA Big Data - The Hong Kong University of Science and Technology: Angular - Google: Project Management Specialization - OpenClassrooms: Java EE - Scrimba: Frontend Developper Career Path - Google\\\\u2019s Qwiklabs: Cloud Solutions Skills - HTML5 / CSS3 / JavaScript(ES6+) - PHP (Laravel / Wordpress) - Java (JEE, Spring) - Python (Flask / D jango) - Machine Learning (TensorFlow) - Angular / React - Firebase / MySQL - Flutter / React Native - Docker / Kubernetes - JIRA [email protected] www.anasbouabid.com linkedin.com/in/anas-bouabid +212 645 23 77 40 ANAS BOUABID 5th year, Software Engineering Student \"\n Knowledge: [\"Software Engineering\",\"Multi-media and Web Design\",\"Flask\",\"VueJs\",\"Python\",\"Django\",\"ML\",\"Raspberry Pi\",\"Spring\",\"Web Sockets\",\"Java EE\",\"TensorFlow\",\"Enactus\",\"Google Developer Student Clubs\",\"PHP\",\"Laravel\",\"Angular\",\"Wordpress\",\"Docker\",\"Lumen\",\"HTML5\",\"CSS3\",\"JavaScript\",\"ES6\",\"JEE\",\"Spring\",\"Firebase\",\"MySQL\",\"Flutter\",\"React Native\",\"Kubernetes\",\"JIRA\",\"HCIA Cloud Computing\",\"HCIA Big Data\",\"Angular\",\"Project Management\",\"Java EE\",\"Frontend Developer\",\"Cloud Solutions\"]\n\n Input: ",
"\\\\n",
"\n Knowledge: ",
"\n Write Knowledge with the same form.\n\n Input: \"+212 648265022 - HTML / CSS / JAVASCRIPT - Hackathon H-LSID : 2nd Place < ENSET Mohammedia > [email protected] linkedin.com/in /badrjarmouni EDUCATION HACKATHONS SKILLS CONTACT EXPERIENCE Dadupa Consulting: 4 months (PFA) << Social Network >>: Connecting project holders with investors/mentors ( ReactJs , Lumen ) - 2021/2022 INTERNSHIPS: JARMOUNI - ReactJS / NodeJS /ExpressJS - PHP < Laravel,Lumen > - Mongodb / Firebase / MySQL - Java < JEE , SpringBoot > - Flutter - Python < Flask > - Swift / C / C++ - Hackathon HackTheFuture : 3rd Place < ENSA Marrakech > DEUST: Mathematics,Computer Science,Physics < MIP > 2018 - 2020 LST (Bachelor 's Degree): Computer Science Engineering 2020 - 2021 License en Sciences et Techniques MST (Master's Degree): Networks and Computer Systems < RSI > 2021 - 2023 Master en Sciences et Techniques FST SETTAT - FST SETTAT - FST SETTAT - - SCRUM National Innovation Comp\\\\\\\\u00e9tition for Entrepreneurship IT cell member EXTRACURRICULAR: PROJECTS: DroneWay: Planning optimal paths for drone deliveries <Python, Flask, VueJS> Design and creation of a desktop application for the management of internships in companies <Java> Creation of a remote control system for domestic devices <Nodemcu> Mobile Application (Quizz) <Flutter, Firebase> LANGUAGE SKILLS English: French: Felicity Optics: 3 months (PFE) Design and creation of an e-commerce website for optics and eyewear Laravel - 2020/2021 Design and creation of the platform ALTWAY that automates the recruitment process (RH) <SpringBoot, React> Design and creation of NICE website National Innovation Competition for Entrepreneurship - React , Django - \"\n Knowledge: [\"SCRUM\",\"PHP\",\"Laravel\",\"ReactJS\",\"NodeJS\",\"ExpressJS\",\"HTML\",\"CSS\",\"JAVASCRIPT\",\"Mongodb\",\"Firebase\",\"MySQL\",\"JEE\",\"SpringBoot\",\"Flutter\",\"Python\",\"Swift\",\"English\",\"French\",\"Networks and Computer Systems\",\"Computer Science Engineering\"]\n\n Input: \"SKILLS Languages: Arabic, French, Anglais. OS: Windows, UNIX. Modeling & Databases UML, SQL Server, Oracle, Firebase. Electronic: Proteus ISIS & ARES, Arduino, MPLAB. Frontend: HTML, CSS, Bootstrap, React JS, Angular, Vue JS. Programming Languages & Framework: Flutter, Swift, JAVA, JEE, Spring, PHP, C/C++, Assembleur, JavaScript, Node JS, Python, Tensor Flow. PARA-UNIVERSITY ACTIVITIES & EXPERIENCES NICE: Vice president 24th Promotion Award Ceremony: Member of the committee GDSC UH1: Design Lead 21\\\\\\\\u2019 (Core team) FST Settat 21\\\\\\\\u2019 integration week: Media & design manager CTDE: Design manager ENACTUS FST Settat , CLIC , JLM: Member INTERESTS Reading chess Design Swimming Certi\\\\\\\\ufb01cates Udemy, Google AI, Coursera\\\\\\\\u2026 Certi\\\\\\\\ufb01cates (link) EDUCATION FACULTY OF SCIENCE AND TECHNOLOGY SETTAT - UH1 (FSTS) \\\\\\\\u2794 Engineering Degree: Computer Engineering, option: software quality. \\\\\\\\u2794 University degree in electrical engineering and mechanical engineering. HIGHER TECHNICIAN CERTIFICATE CENTER SETTAT (BTS) \\\\\\\\u2794 Higher technician diploma: Electronic Systems. TECHNICAL QUALIFIER HIGH SCHOOL SETTAT (LQTS) \\\\\\\\u2794 Baccalaureate: Science and electrical technology. PROFESSIONAL EXPERIENCES Capgemini Engineering : Application Internship \\\\\\\\u2794 Creating an application to automate the generation of ALTIS/ISS monitoring dashboards. \\\\\\\\u2794 Technologies used: Python, SQLite SMART CAR CONNECT: End of year Internship \\\\\\\\u2794 Designing and developing the OTOMOTO marketplace (platform for the sale of car spare parts). \\\\\\\\u2794 Technologies used: React JS, Node JS INTELLCAP: Application Internship \\\\\\\\u2794 Developing algorithms for predicting and optimizing kerosene consumption in aviation using AI. \\\\\\\\u2794 Technologies used: Python, Deep learning PROFESSIONAL PROJECTS DroneWay (Optimization of drone routes): Vue.JS, Python, Flask, VRP Cluedo game (Web): JEE, HTML, Bootstrap, JS, MySQL Autodiam (Mobile App for RAC detection): FLutter, Python, \\\\\\\\ufb01rebase, Flask Data Centre supervision (IOT): NodeRed, Node JS, Web sockets, C Nerds (Mobile app for general knowledge in IT): FLutter, \\\\\\\\ufb01rebase, Python Young PLASTE (youngplaste.me): UX, UI, CSS, JS Data visualisation (Web): Oracle (PLSQL), Python, Flask, HTML, Tailwind CSS Student management (Mobile): SWIFT, Core Data Item management (Web): JEE, HTML, CSS Chat App (Web client/serveur): C, Web sockets, JS, HTML Management of internships in companies (Desktop): JAVA, SWING, MySQL Recommendation system: Python, factor analysis (ACP, AFC) Remote control (IOT): NODEMCU, HTTP, HTML, CSS Smart home (IOT): Raspberry Pi, NODEMCU, NodeRed, MQTT hotel management (Web): v1.0 -> SQL Server (TSQL), C | v2.0 -> HTML, CSS, ASP, VB COMPETITIONS & ACHIEVEMENTS Program: Explorer Innovation Fund (UM6P & MIT Sandbox) Top 10 projects: Cluster Maintenance 4.0 at STARTGATE 3rd place: Hackathon HTF 2022 at ENSA Marrakech Participation: Hult Prize Regional summit online at Lisbonne 2nd prize: Hackathon LSID 2022 at ENSET Mohammedia Participation: Entrepreneur FAIR 2022 competition at ENSA Berrechid 1st prize: Hult Prize on campus 2022 at Cit\\\\\\\\u00e9 d\\\\\\\\u2019innovation UH1 1st prize: The pitch competition (5th Edition) at ENCG Settat Incubation program: U-founders at UM6P Participation: Let\\\\\\\\u2019s challenge 6.0 at EST Beni-Mellal Presentation of Hassan 1st University in the \\\\\\\\u201cFABLAB universitaire\\\\\\\\u201d at USMS Mohamed LAHARRAF Software engineering student Mobile: +212 695 587 968 Address: Casablanca-Settat, Morocco Email: [email protected] LinkedIn: Mohamed LAHARRAF (link) GitHub: github.com/mohamedlaharraf (link) Portfolio: mohamedlaharraf.engineer 2018 - 2023 2020 - 2021 2017 - 2018 Casablanca, Aug. - Oct. 2022 RABAT, July - Sept. 2022 RABAT, July - Sept. 2021 Sept. 2022 July 2022 May 2022 May 2022 April 2022 April 2022 March 2022 March 2022 Feb - Sept. 2022 Nov. 2021 \"\n Knowledge: [\"Arabic\",\"French\",\"English\",\"Windows\",\"UNIX\",\"UML\",\"SQL Server\",\"Oracle\",\"Firebase\",\"Flutter\",\"Swift\",\"JAVA\",\"JEE\",\"Spring\",\"PHP\",\"C/C++\",\"Assembleur\",\"JavaScript\",\"Node JS\",\"Python\",\"Tensor Flow\",\"Python\",\"SQLite\",\"React JS\",\"Node JS\",\"Engineering Degree\",\"Computer Engineering\",\"software quality\",\"Electronic Systems\"]\n\n Input: \"Realization of a remote control system for domestic appliances with Nodemcu Creation of the website NICE National Innovation Competition for Entrepreneurship - React , Django - 2nd place hackathon H-LSID ENSET Mohammedia linkedin.com/in/beraichimane/ Quiz : Mobile Application - Flutter , NodeJs - CERTIFICATIONS English : Fluent Arabic : Native French : TCF B2 LANGUAGES EXPERIENCES Internships Para-academics: Projects Geo4net / Web Developer ( 7 mois ) Smart Car Connect / Web Developer ( 2 mois ) Club BEEA / FST Settat Club Ihssen / FST Settat Design and construction of the platform KHASNI to connect between a service provider and a service requester - Spring , Angular - Design and creation of a vehicle\\\\\\\\u2019s spare parts marketplace Design and construction of the platform INTERVERSITY to connect between students and companies - Spring , Angular - 2018 - 2020 Hackathons Treasurer Head of Media Unit / Design cell member DroneWay : Planning of optimal routes for deliveries by an autonomous drone - Flask, VueJs, Python - Design and production of a desktop application for the management of internships in company - Java Swing - 3rd place hackathon HTFH ENSA Marrakech ( Juillet - Septembre ) 2022 Implementation of a real-time geolocation web application and vehicle feet management ( Juin - Decembre ) 2021 2018 - 2019 HTML / CSS / JAVASCRIPT / TYPE SCRIPT C / C++ / SWIFT PHP / JAVA / JAVA EE / SPRING DART / FLUTTER PYTHON / FLASK / DJANGO HUAWEI : HCSA STORAGE GOOGLE : PROJECT MANAGEMENT OPENCLASSROOMS: JAVA EE ANGULAR / REACT JS SKILLS Master : Network and Computer systems Hassan First University I FST Settat LST : Computer Engineering Hassan First University I FST Settat Deust : Mathematics, Computer Science, Physics Hassan First University I FST Settat Baccalaureate : Physics High school Tarik Ibnu Ziad I Meknes EDUCATION 2021 - 2023 2020 - 2021 2017 - 2020 2016 - 2017 CONTACT [email protected] +212 708-038348 BERAICH Imane Student in 2nd year Master RSI \"\n Knowledge: [\"Network and Computer systems\",\"Computer Engineering\",\"English\",\"French\",\"Arabic\",\"Flask\",\"VueJs\",\"Python\",\"Spring\",\"Angular\",\"Nodemcu\",\"Java\",\"Swing\",\"React\",\"Django\",\"Flutter\",\"NodeJs\",\"HTML\",\"CSS\",\"JAVASCRIPT\",\"TYPE SCRIPT\"]\n\n Input: \"Engineer's degree in Software Engineering FST Settat - Engineering Degree 2020-2023 University Diploma of Technology in Multi\\\\\\\\ufffd media and Web Design ENS T\\\\\\\\u00e9touan - DUT 2018-2020 Education - DroneWay: Planning of optimal routes for deliveries by an autonomous drone (Flask, VueJs, Python). - Intelligent patient monitoring system (D jango,ML,Raspberry Pi). - Cluedo: Multiplayer game (Spring,Web Sockets). - AjiTechri: E-commerce (Java EE). - Movie recommendation system (Flask, TensorFlow) - Drowsiness detection (Raspberry, Machine Learning). - ... Projects: Club Enactus: Project Manager. Club GDSC (Google Developer Student Clubs): Head of WEB development unit. 08/2021-08/2022 Para-academics: Hirschmann Automative: program- ming of an ERP web application using php. 07/2019-08/2019 (2 months) SHM: developed a web application using Laravel for the management of the different sectors of associations/ companies. 04/2020-07/2020 (4 months) Luova Tech: development of showcase websites using Wordpress, and web applications with Angular for different clients. 08/2021-09/2021 (2 months) Dadupa: creation of a platform for connecting project holders and inves- tors / business angels (React, Lumen and Docker). 07/2022-09/2022 (3 months) Internships: Work experience Hackathons - 2nd place hackathon H-LSID ENSET Mohammedia. - 3rd place hackathon HTFH ENSA Marrakech. - 1st place hackathon BCG Platinion (National and International). Certi\\\\\\\\ufb01cations - Huawei: HCIA Cloud Computing - Huawei: HCIA Big Data - The Hong Kong University of Science and Technology: Angular - Google: Project Management Specialization - OpenClassrooms: Java EE - Scrimba: Frontend Developper Career Path - Google\\\\\\\\u2019s Qwiklabs: Cloud Solutions Skills - HTML5 / CSS3 / JavaScript(ES6+) - PHP (Laravel / Wordpress) - Java (JEE, Spring) - Python (Flask / D jango) - Machine Learning (TensorFlow) - Angular / React - Firebase / MySQL - Flutter / React Native - Docker / Kubernetes - JIRA [email protected] www.anasbouabid.com linkedin.com/in/anas-bouabid +212 645 23 77 40 ANAS BOUABID 5th year, Software Engineering Student \"\n Knowledge: [\"Software Engineering\",\"Multi-media and Web Design\",\"Flask\",\"VueJs\",\"Python\",\"Django\",\"ML\",\"Raspberry Pi\",\"Spring\",\"Web Sockets\",\"Java EE\",\"TensorFlow\",\"Enactus\",\"Google Developer Student Clubs\",\"PHP\",\"Laravel\",\"Angular\",\"Wordpress\",\"Docker\",\"Lumen\",\"HTML5\",\"CSS3\",\"JavaScript\",\"ES6\",\"JEE\",\"Spring\",\"Firebase\",\"MySQL\",\"Flutter\",\"React Native\",\"Kubernetes\",\"JIRA\",\"HCIA Cloud Computing\",\"HCIA Big Data\",\"Angular\",\"Project Management\",\"Java EE\",\"Frontend Developer\",\"Cloud Solutions\"]\n\n Input: \"\"\" + resume_text.replace('\\\\\\\\n', ' ') + \"\"\"\n Knowledge: ",
"you should behave like human resources specialist"
] |
2024-01-10 | Salah-boutayeb/genAI | src~utils~embeddings.py | import cohere
import numpy as np
import re
import os
import pandas as pd
from tqdm import tqdm
from datasets import load_dataset
import umap
import altair as alt
from sklearn.metrics.pairwise import cosine_similarity
from annoy import AnnoyIndex
import warnings
warnings.filterwarnings('ignore')
pd.set_option('display.max_colwidth', None)
result = os.path.dirname(os.path.realpath('__file__'))
api_key = '22L1f26PPdIZVcoLeERyldwGIRVGXcUvqfaQNItT'
co = cohere.Client(api_key)
# Get dataset
df = pd.read_csv("../data/internships.csv")
embeds = co.embed(texts=list(df['job_description']),
model='embed-english-v2.0').embeddings
all_embeddings = np.array(embeds)
relative_path = "../data/embeddings.npy"
full_path = os.path.join(result, relative_path)
np.save(full_path, all_embeddings)
search_index = AnnoyIndex(np.array(embeds).shape[1], 'angular')
# Add all the vectors to the search index
for i in range(len(embeds)):
search_index.add_item(i, embeds[i])
search_index.build(3)
query = """Advocate for usability and consistency across the Firm’s products
Build Single Page Applications using SharePoint 2010/2013’s Client Object Model, SPServices, and jQuery
Create cutting edge mobile, web, and desktop interface designs that meet today’s evolving industry standards, business requirements, and are consistent with the KP brand
Oversee others’ designs and be able to critique and guide them
Collaborate and partner with taxonomists, engineers, and program management to develop an overarching UX strategy and implementation plan for Taxonomy Tools and Automation
Help visualize, communicate, and implement designs and assets
Bring strong, user-centered design skills and business knowledge to translate complex workflows into exceptional design solutions at scale"""
# Get the query's embedding
query_embed = co.embed(texts=[query], model="embed-english-v2.0").embeddings
# Retrieve the nearest neighbors
similar_item_ids = search_index.get_nns_by_vector(
query_embed[0], 10, include_distances=True)
# Format the results
results = pd.DataFrame(
data={'texts': df.iloc[similar_item_ids[0]]['job_description'], 'distance': similar_item_ids[1]})
print(f"Query:'{query}'\nNearest neighbors:")
results
| [] |
2024-01-10 | junwankimm/AP_Gpu_Side | comm.py | import socket
import io
import numpy as np
import pickle
import configparser
import os
import torch
import numpy as np
import trimesh
import sys
import cv2
import shutil
from pathlib import Path
from pytorch3d.io import save_obj
from semantify.utils._3dmm_utils import ThreeDMMUtils
from semantify.utils.models_factory import ModelsFactory
from semantify.utils.paths_utils import get_model_abs_path
from semantify.utils.general import get_model_to_eval
from langchain.llms import OpenAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain.prompts.prompt import PromptTemplate
from MICA.MICA import MICA, process_single, to_batch, deterministic
#Make Conversation Using Prompts
class ChatGPTConversation:
def __init__(self):
config = configparser.ConfigParser()
config.read("./keys.ini")
openai_api_key = config['OPENAI']['OPENAI_API_KEY']
os.environ.update({'OPENAI_API_KEY': openai_api_key})
llm = OpenAI(temperature=0.9)
template = """I have 4 basis of human expression : "smiling", "serious", "angry", "disgusted" this bases continously scaled between -1 and 1. For example with "smiling" basis with value of 0.7 and others with 0, it expresses smiling face. In opposite, with "smiling" basis with value of -1 and others with 0, it expresses strong sad face. Additionally combing multiple bases can express more diverse expressions.
From now, you should answer predicted values of expressions you might feel for every user input. Answer in format smiling : value, serious : value, angry : value, disgusted : value
Do not include any other text to your answer.
Previous values:
{history}
User: {input}
Predicted Values:"""
PROMPT = PromptTemplate(input_variables=["history", "input"], template=template)
self.conversation = ConversationChain(
prompt=PROMPT,
llm=llm,
memory=ConversationBufferMemory()
)
def __call__(self, prompt):
self.conversation.memory.clear()
text = self.conversation.predict(input=prompt)
return text
class ReactToConversation:
def __init__(self):
#####SEMANTIFY CONFIGS#####
mapper_path = get_model_abs_path("flame", "expression")
num_coeffs = 10
self.model, self.labels = get_model_to_eval(mapper_path)
self.threedmm_utils = ThreeDMMUtils()
self.ignore_random_jaw = True
self.device = torch.device("cuda")
self.num_sentiments = 4
self.model_kwargs = ModelsFactory("flame").get_default_params(with_face=True, num_coeffs=num_coeffs)
self.model_kwargs["num_coeffs"] = num_coeffs
self.model_kwargs["gender"] = "neutral"
self.llm = ChatGPTConversation()
self.shape_params = torch.zeros(300).unsqueeze(0)
#####MICA CONFIGS#####
pretrained='MICA/data/pretrained/mica.tar'
self.mica, self.faces, self.app = MICA(pretrained, self.device)
#####TCP(Local Python) CONNECTION#####
port = 22222
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind(('0.0.0.0', port))
print("waiting for local server to send image")
self.s.listen()
self.client_socket, addr = self.s.accept()
#####TCT(Unity) CONNECTION)########
shutil.rmtree('./logs')
os.makedirs('./logs')
#Given input text make corresponding expression parameters
def react_to(self, input_text, count=1):
shape_params = self.shape_params
prd = self.llm(input_text)
pairs = prd.strip().split(',') #get values from gpt answer
emotions = []
for pair in pairs:
_, value = pair.split(':')
emotions.append((float(value.strip())*20) + 20)
assert(self.num_sentiments == len(emotions))
values = torch.tensor(emotions, dtype=torch.float32)[None]
out = self.model(values.to(self.device)).cpu() #Predict real expression parameters
expression_params = out
np.save(f'./logs/{count}_expression_params.npy', out.detach().to('cpu').numpy())
verts, faces = self.threedmm_utils.get_flame_model(expression_params=expression_params, shape_params=shape_params)
# Save it to obj for test
# os.makedirs("objs", exist_ok=True)
# input_text = input_text.strip().replace(" ", "_")
# obj_path = f"objs/{count}_{input_text}.obj"
# save_obj(
# f=obj_path,
# verts=torch.tensor(verts).squeeze(),
# faces=torch.tensor(faces).squeeze()
# )
# print(f'saved at {obj_path}')
return verts, faces
#Get capture image from local
def from_local(self):
with self.client_socket:
pickle_data = b""
while True:
data = self.client_socket.recv(4096)
if data[-len(b"END"):] == b"END":
pickle_data += data[:-len(b"END")]
print("END MET")
break
pickle_data += data
received_image = pickle.loads(pickle_data)
assert(received_image.shape[2] == 3)
return received_image
# MICA
def convert_to_flame(self):
input_img = self.from_local()
mica = self.mica
app = self.app
arcface='MICA/demo/arcface'
deterministic(42)
with torch.no_grad():
print("MICA started...")
path = process_single(arcface, app, input_img, draw_bbox=False)
images, arcface = to_batch(path)
codedict = mica.encode(images, arcface)
opdict = mica.decode(codedict)
code = opdict['pred_shape_code']
return code[0]
def __call__(self):
self.shape_params = self.convert_to_flame().unsqueeze(0)
np.save("./logs/0_shape_params.npy", self.shape_params.to('cpu').numpy())
count = 1
while True:
input_text = input("Say something to you : ")
vert, faces = self.react_to(input_text, count)
# self.to_local(vert, faces)
count += 1
if (input_text=='q'):
break
self.s.close()
if __name__ == "__main__":
test = ReactToConversation()
test() | [
"I have 4 basis of human expression : \"smiling\", \"serious\", \"angry\", \"disgusted\" this bases continously scaled between -1 and 1. For example with \"smiling\" basis with value of 0.7 and others with 0, it expresses smiling face. In opposite, with \"smiling\" basis with value of -1 and others with 0, it expresses strong sad face. Additionally combing multiple bases can express more diverse expressions. \n\nFrom now, you should answer predicted values of expressions you might feel for every user input. Answer in format smiling : value, serious : value, angry : value, disgusted : value\nDo not include any other text to your answer.\n\nPrevious values:\n{history}\nUser: {input}\nPredicted Values:",
"input"
] |
2024-01-10 | ddehun/coling2022_reweighting_sts | modeling.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the core classes used by DINO.
"""
import math
from abc import ABC, abstractmethod
from typing import List, Optional, Dict, Any, Union
import openai
import torch
from tqdm import tqdm
from transformers import GPT2Tokenizer, PreTrainedTokenizer, PreTrainedModel
from generation import SelfDebiasingGPT2LMHeadModel
from utils import DatasetEntry
PLACEHOLDER_STR = "<X1>"
class DinoGenerator:
"""
This class represents a generative language model which can be used to generate datasets from instructions.
"""
def __init__(
self,
task_spec: Dict[str, Any],
model: Union["str", "ModelWrapper"] = None,
openai_api_key: Optional[str] = None,
max_output_length: int = 40,
decay_constant: float = 100,
top_p: float = 0.9,
top_k: int = 5,
remove_duplicates: bool = True,
remove_identical_pairs: bool = False,
min_num_words: int = -1,
min_num_tokens: int = -1,
keep_outputs_without_eos: bool = False,
allow_newlines_in_outputs: bool = False,
):
"""
:param task_spec: the task specification
:param model: a wrapper around the underlying language model.
If GPT-3 is used, this should instead be the name of the GPT-3 model (e.g., "davinci")
:param openai_api_key: an optional API key for GPT-3. If given, GPT-3 is used as a language model
:param max_output_length: the maximum output length for each generated text
:param decay_constant: the decay constant for self-debiasing
:param top_p: p value for top-p sampling (set to 0 to perform no top-p sampling)
:param top_k: k value for top-k sampling (set to 0 to perform no top-k sampling)
:param remove_duplicates: whether duplicates should be removed from the generated dataset
:param remove_identical_pairs: whether text pairs with identical texts should be removed (only for text pair datasets)
:param min_num_words: the minimum number of (whitespace-separated) words for each dataset entry
:param min_num_tokens: the minimum number of tokens for each dataset entry
:param keep_outputs_without_eos: if set to true, examples where the language model does not output a quotation mark (which is
interpreted as a signal that it has completed its output) are not removed from the dataset.
:param allow_newlines_in_outputs: if set to true, model outputs that contain a newline character before the end-of-sequence token
(a quotation mark) are not removed from the dataset
"""
self.model = model
self.openai_api_key = openai_api_key
self.max_output_length = max_output_length
self.decay_constant = decay_constant
self.top_p = top_p
self.top_k = top_k
self.remove_duplicates = remove_duplicates
self.remove_identical_pairs = remove_identical_pairs
self.min_num_words = min_num_words
self.min_num_tokens = min_num_tokens
self.keep_outputs_without_eos = keep_outputs_without_eos
self.allow_newlines_in_outputs = allow_newlines_in_outputs
self.labels = list(task_spec["labels"].keys())
self.instructions = {
label: task_spec["labels"][label]["instruction"]
for label in self.labels
}
self.counter_labels = {
label: task_spec["labels"][label].get("counter_labels", [])
for label in self.labels
}
def generate_dataset(
self,
input_texts: Optional[List[str]],
num_entries_per_input_and_label: Optional[int] = None,
num_entries_per_label: Optional[int] = None,
batch_size: Optional[int] = None,
) -> List[DatasetEntry]:
"""
Generate a new dataset.
:param input_texts: an optional list of raw texts; this is required for generating text pair datasets
:param num_entries_per_input_and_label: the number of entries to generate for each pair of input text and label
:param num_entries_per_label: the number of entries to generate for each label
:param batch_size: the number of entries to generate simultaneously
:return: the generated dataset
"""
generate_with_inputs = input_texts is not None
if not generate_with_inputs:
input_texts = list(
range(math.ceil(num_entries_per_label / batch_size))
)
num_entries_per_input_and_label = batch_size
input_iterator = tqdm(input_texts, desc="Dataset Entries")
dataset = []
for idx, input_text_or_id in enumerate(input_iterator):
for label in self.labels:
dataset += self._generate_dataset_entries(
input_text_or_id,
label=label,
num_entries=num_entries_per_input_and_label,
generate_with_inputs=generate_with_inputs,
)
dataset = self._postprocess_dataset(dataset, generate_with_inputs)
return dataset
def _generate_dataset_entries(
self,
input_text_or_id: Union[str, int],
label: str,
num_entries: int,
generate_with_inputs: bool,
) -> List[DatasetEntry]:
instruction = self._build_instruction(
label, input_text_or_id, generate_with_inputs
)
if self.openai_api_key is not None:
try:
model_responses = [
openai.Completion.create(
engine=self.model,
prompt=instruction,
max_tokens=self.max_output_length,
top_p=self.top_p,
stop=['"'],
)
for _ in range(num_entries)
]
model_outputs = [
model_response["choices"][0]["text"]
for model_response in model_responses
]
except openai.error.RateLimitError as e:
print(e)
return []
else:
counter_instructions = [
self._build_instruction(
other_label, input_text_or_id, generate_with_inputs
)
for other_label in self.counter_labels[label]
]
model_outputs = self.model.generate_self_debiasing(
input_text=instruction,
debiasing_texts=counter_instructions,
num_samples=num_entries,
decay_constant=self.decay_constant,
do_sample=True,
min_length=self.max_output_length,
max_length=self.max_output_length,
top_k=self.top_k,
top_p=self.top_p,
)
model_outputs = [
self._process_output(
input_text=input_text_or_id,
output_text=output,
label=label,
generate_with_inputs=generate_with_inputs,
)
for output in model_outputs
]
model_outputs = [
output for output in model_outputs if output is not None
]
return model_outputs
def _build_instruction(
self, label: str, text: str, generate_with_inputs: bool
) -> str:
instruction_template = self.instructions[label]
if generate_with_inputs:
assert (
instruction_template.count(PLACEHOLDER_STR) == 1
), f"An input text was provided, but the instruction for label '{label}' does not contain exactly one placeholder"
return instruction_template.replace(PLACEHOLDER_STR, text)
else:
assert (
instruction_template.count(PLACEHOLDER_STR) == 0
), f"No input text was provided, but the instruction for label '{label}' contains a placeholder"
return instruction_template
def _process_output(
self,
input_text: Union[str, int],
output_text: str,
label: str,
generate_with_inputs: bool,
) -> Optional[DatasetEntry]:
output_text = (
output_text.split('"')[0]
if '"' in output_text
else (output_text if self.keep_outputs_without_eos else None)
)
if output_text and (
"\n" not in output_text or self.allow_newlines_in_outputs
):
text_a = input_text if generate_with_inputs else output_text
text_b = output_text if generate_with_inputs else None
return DatasetEntry(text_a=text_a, text_b=text_b, label=label)
return None
def _postprocess_dataset(
self, dataset: List[DatasetEntry], generate_with_inputs: bool
) -> List[DatasetEntry]:
if self.remove_duplicates:
dataset = list(set(dataset))
if self.min_num_words > 0:
if generate_with_inputs:
dataset = [
entry
for entry in dataset
if len(entry.text_b.split()) >= self.min_num_words
]
else:
dataset = [
entry
for entry in dataset
if len(entry.text_a.split()) >= self.min_num_words
]
if self.min_num_tokens > 0:
if generate_with_inputs:
dataset = [
entry
for entry in dataset
if len(self.model._tokenizer.tokenize(entry.text_b))
>= self.min_num_tokens
]
else:
dataset = [
entry
for entry in dataset
if len(self.model._tokenizer.tokenize(entry.text_a))
>= self.min_num_tokens
]
if generate_with_inputs and self.remove_identical_pairs:
dataset = [
entry for entry in dataset if entry.text_a != entry.text_b
]
return dataset
class ModelWrapper(ABC):
"""
This class represents a wrapper for a pretrained language model that provides high-level functions for the generation of texts with
the self-debiasing method described in https://arxiv.org/abs/2103.00453.
"""
def __init__(self, use_cuda: bool = True):
"""
:param use_cuda: whether to use CUDA
"""
self._device = (
"cuda" if torch.cuda.is_available() and use_cuda else "cpu"
)
self._tokenizer = None # type: Optional[PreTrainedTokenizer]
self._model = None # type: Optional[PreTrainedModel]
def query_model(self, input_text: str) -> torch.FloatTensor:
"""For a given input text, returns the probability distribution over possible next tokens."""
return self.query_model_batch([input_text])[0]
@abstractmethod
def query_model_batch(self, input_texts: List[str]) -> torch.FloatTensor:
"""For a batch of input texts, returns the probability distribution over possible next tokens."""
pass
@abstractmethod
def generate(self, input_text: str, **kwargs) -> str:
"""Generates a continuation for a given input text."""
pass
@abstractmethod
def generate_self_debiasing(
self,
input_text: str,
debiasing_texts: List[str],
num_samples: int = 1,
decay_constant: float = 100,
epsilon: float = 0.01,
debug: bool = False,
**kwargs,
) -> List[str]:
"""
Generates continuations for the given input texts with self-debiasing.
:param input_texts: the input texts to generate continuations for
:param debiasing_prefixes: the debiasing prefixes to be used
:param decay_constant: the decay constant (lambda in the paper)
:param epsilon: the minimum factor by which each probability is multiplied
:param debug: whether to print additional debugging output
:param kwargs: further arguments are passed on to the original generate function
:return: the list of generated continuations
"""
pass
class GPT2Wrapper(ModelWrapper):
def __init__(self, model_name: str = "gpt2-xl", use_cuda: bool = True):
"""
:param model_name: the name of the pretrained GPT2 model (default: "gpt2-xl")
:param use_cuda: whether to use CUDA
"""
super().__init__(use_cuda=use_cuda)
self._tokenizer = GPT2Tokenizer.from_pretrained(model_name)
self._model = SelfDebiasingGPT2LMHeadModel.from_pretrained(
model_name
) # type: SelfDebiasingGPT2LMHeadModel
if use_cuda:
self._model.parallelize()
self._tokenizer.pad_token = self._tokenizer.eos_token
self._model.config.pad_token_id = self._tokenizer.eos_token_id
def query_model_batch(self, input_texts: List[str]):
inputs = self._tokenizer.batch_encode_plus(
input_texts, padding=True, max_length=512, return_tensors="pt"
)
inputs = {key: val.to(self._device) for key, val in inputs.items()}
output_indices = inputs["attention_mask"].sum(dim=1) - 1
output = self._model(**inputs)["logits"]
return torch.stack(
[
output[example_idx, last_word_idx, :]
for example_idx, last_word_idx in enumerate(output_indices)
]
)
def generate(self, input_text: str, **kwargs):
input_ids = self._tokenizer.encode(
input_text, return_tensors="pt"
).to(self._device)
output_ids = self._model.generate(input_ids, **kwargs)[0]
return self._tokenizer.decode(output_ids)
def generate_self_debiasing(
self,
input_text: str,
debiasing_texts: List[str],
num_samples: int = 1,
decay_constant: float = 100,
epsilon: float = 0.01,
debug: bool = False,
min_length: int = None,
max_length: int = None,
**kwargs,
) -> List[str]:
self._model.init_logits_processor(
num_debiasing_prefixes=len(debiasing_texts),
decay_constant=decay_constant,
epsilon=epsilon,
debug=debug,
tokenizer=self._tokenizer,
)
inputs = [input_text] * num_samples
for debiasing_text in debiasing_texts:
inputs += [debiasing_text] * num_samples
inputs = self._tokenizer.batch_encode_plus(
inputs, padding=True, return_tensors="pt"
)
inputs["attention_mask"] = torch.flip(
inputs["attention_mask"], dims=[1]
)
shifts = inputs["attention_mask"].shape[-1] - inputs[
"attention_mask"
].sum(dim=-1)
for batch_idx in range(inputs["input_ids"].shape[0]):
inputs["input_ids"][batch_idx] = inputs["input_ids"][
batch_idx
].roll(shifts[batch_idx].item())
inputs = {k: v.to(self._device) for k, v in inputs.items()}
input_length = inputs["input_ids"].shape[1]
if min_length is not None:
min_length = min_length + input_length
if max_length is not None:
# max_length = min(self.model._model.config.max_position_embeddings, max_length + input_length)
max_length = max_length + input_length
output_ids = self._model.generate(
**inputs, min_length=min_length, max_length=max_length, **kwargs
)
batch_size = output_ids.shape[0] // (1 + len(debiasing_texts))
output_ids = output_ids[:batch_size, inputs["input_ids"].shape[1] :]
return self._tokenizer.batch_decode(output_ids)
| [] |
2024-01-10 | aitorllj93/the-artificial-agent | src~core~interpreters~langchain~ChatAgent.py | """Chat agent with question answering
"""
import os
import re
import logging
# from utils.giphy import GiphyAPIWrapper
from dataclasses import dataclass
from langchain.chains import LLMChain, LLMRequestsChain
from langchain import Wikipedia, OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.agents.react.base import DocstoreExplorer
from langchain.agents import Tool, AgentExecutor, load_tools, initialize_agent, get_all_tool_names
from langchain.prompts import PromptTemplate
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.agents.conversational.base import ConversationalAgent
from datetime import datetime
from core.registry.personalities import get_personality, default_personality
from core.config import get_value
import langchain
from langchain.cache import InMemoryCache
langchain.llm_cache = InMemoryCache()
logger = logging.getLogger(__name__)
# news_api_key = os.environ["NEWS_API_KEY"]
# tmdb_bearer_token = os.environ["TMDB_API_KEY"]
@dataclass
class ChatAgent:
agent_executor: AgentExecutor = None
memory: ConversationBufferMemory = None
personality_name: str = None
def _get_docstore_agent(self):
docstore = DocstoreExplorer(Wikipedia())
docstore_tools = [
Tool(
name="Search",
func=docstore.search,
description="Search wikipedia"
),
Tool(
name="Lookup",
func=docstore.lookup,
description="Lookup a wikipedia page"
)
]
docstore_llm = OpenAI(temperature=0, openai_api_key=get_value(
'providers.openai.apiKey'))
docstore_agent = initialize_agent(
docstore_tools, docstore_llm, agent="react-docstore", verbose=True)
return docstore_agent
def _get_requests_llm_tool(self):
template = """
Extracted: {requests_result}"""
PROMPT = PromptTemplate(
input_variables=["requests_result"],
template=template,
)
def lambda_func(input):
out = chain = LLMRequestsChain(llm_chain=LLMChain(
llm=OpenAI(temperature=0, openai_api_key=get_value(
'providers.openai.apiKey')),
prompt=PROMPT)).run(input)
return out.strip()
return lambda_func
def __init__(self, *, conversation_chain: LLMChain = None, history_array, personality_name: str = None):
self.personality_name = personality_name
date = datetime.today().strftime('%B %d, %Y at %I:%M %p')
# set up a Wikipedia docstore agent
docstore_agent = self._get_docstore_agent()
# giphy = GiphyAPIWrapper()
# tool_names = get_all_tool_names()
print(get_all_tool_names())
tool_names = [
# 'serpapi',
# 'wolfram-alpha',
'llm-math',
'open-meteo-api',
'news-api',
# 'tmdb-api',
'wikipedia'
]
requests_tool = self._get_requests_llm_tool()
tools = load_tools(tool_names,
llm=OpenAI(temperature=0, openai_api_key=get_value(
'providers.openai.apiKey')),
news_api_key=get_value('providers.news.apiKey')
)
# news_api_key=news_api_key,
# tmdb_bearer_token=tmdb_bearer_token)
# Tweak some of the tool descriptions
for tool in tools:
if tool.name == "Search":
tool.description = "Use this tool exclusively for questions relating to current events, or when you can't find an answer using any of the other tools."
if tool.name == "Calculator":
tool.description = "Use this to solve numeric math questions and do arithmetic. Don't use it for general or abstract math questions."
tools = tools + [
Tool(
name="WikipediaSearch",
description="Useful for answering a wide range of factual, scientific, academic, political and historical questions.",
func=docstore_agent.run
),
# Tool(
# name="GiphySearch",
# func=giphy.run,
# return_direct=True,
# description="useful for when you need to find a gif or picture, and for adding humor to your replies. Input should be a query, and output will be an html embed code which you MUST include in your Final Answer."
# ),
Tool(
name="Requests",
func=requests_tool,
description="A portal to the internet. Use this when you need to get specific content from a site. Input should be a specific url, and the output will be all the text on that page."
)
]
ai_prefix = personality_name or default_personality
human_prefix = get_value('common.user.name', "Human")
suffix = f"""
The current date is {date}. Questions that refer to a specific date or time period will be interpreted relative to this date.
Questions that refer to a specific date or time period will be interpreted relative to this date.
After you answer the question, you MUST to determine which langauge your answer is written in, and append the language code to the end of the Final Answer, within parentheses, like this (en-US).
Begin!
Previous conversation history:
{{chat_history}}
New input: {{input}}
{{agent_scratchpad}}
"""
self.memory = ConversationBufferMemory(memory_key="chat_history")
for item in history_array:
self.memory.save_context(
{f"{ai_prefix}": item["prompt"]}, {f"{human_prefix}": item["response"]})
llm = OpenAI(temperature=.5, openai_api_key=get_value(
'providers.openai.apiKey'))
llm_chain = LLMChain(
llm=llm,
prompt=ConversationalAgent.create_prompt(
tools,
# prefix=prefix,
ai_prefix=ai_prefix,
human_prefix=human_prefix,
suffix=suffix
),
)
agent_obj = ConversationalAgent(
llm_chain=llm_chain, ai_prefix=ai_prefix)
self.agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent_obj,
tools=tools,
verbose=True,
max_iterations=5,
memory=self.memory,)
# self.agent_executor = AgentExecutor.from_agent_and_tools(
# agent=agent,
# tools=tools,
# max_iterations=5,
# verbose=True)
def express(self, input: str, lang: str = "en-US"):
personality_llm = ChatOpenAI(temperature=0, openai_api_key=get_value(
'providers.openai.apiKey'))
personality = get_personality(self.personality_name)
personality_prompt = PromptTemplate(
input_variables=["original_words"],
partial_variables={
"lang": lang
},
template=personality["name"] +
" is a " + personality["prompt"] +
" Restate the following as " +
personality["name"] +
" would in {lang}: \n{original_words}\n",
)
self.express_chain = LLMChain(
llm=personality_llm, prompt=personality_prompt, verbose=True, memory=self.memory)
return self.express_chain.run(input)
def run(self, input):
try:
result = self.agent_executor.run(input)
pattern = r'\(([a-z]{2}-[A-Z]{2})\)'
# Search for the local pattern in the string
match = re.search(pattern, result)
language = 'en-US' # defaut
if match:
# Get the language code
language = match.group(1)
# Remove the language code from the reply
result = re.sub(pattern, '', result)
logger.info('Got result from agent: ' + result)
# TODO: this method is not optimum, but it works for now
reply = self.express(result, language)
# reply = self.express_chain.run(result)
logger.info('Answer from express chain: ' + reply)
reply = reply.replace('"', '')
except ValueError as inst:
print("ValueError: \n\n")
print(inst)
reply = "I don't understand what you're saying. Please try again."
except Exception as e:
print(e)
logger.exception(e)
reply = "I'm sorry, I'm having trouble understanding you. Please try again."
return reply
| [
"\n Extracted: {requests_result}",
"name",
"original_words",
" Restate the following as ",
" would in {lang}: \n{original_words}\n",
"requests_result",
" is a ",
"lang"
] |
2024-01-10 | aitorllj93/the-artificial-agent | src~notes~commands~list_tasks~handle.py |
from core.adapters import openai, telegram
from chat.commands.enumerate import prompt
from notes.get_notes import get_today_daily_note
async def handle(
params: dict,
command: dict,
update: telegram.Update,
context: telegram.ContextTypes.DEFAULT_TYPE
):
note = await get_today_daily_note()
tasks = note.list_tasks_from_section(
"Assistant", params['completed'] or False)
tasksStr = ''
for index, task in enumerate(tasks):
tasksStr += f'{index + 1}. {task.text}\n'
prompt_text = prompt(
tasksStr,
command['personality']
)
message = await openai.generate_text_from_prompt(prompt_text)
await telegram.send_text_message(message, update, context)
| [
"personality"
] |
2024-01-10 | Samoppakiks/autofiles | loader.py | import os
import pdf2image
from PIL import Image
import pytesseract
from langchain.document_loaders import Docx2txtLoader
from langchain.document_loaders import UnstructuredPDFLoader
def convert_pdf_to_images(pdf_path):
return pdf2image.convert_from_path(pdf_path)
def save_images(pages):
image_counter = 1
for page in pages:
filename = f"page_{image_counter}.jpg"
page.save(filename, "JPEG")
image_counter += 1
return image_counter
def extract_text_from_images(image_counter):
text = ""
for i in range(1, image_counter):
image_file = f"page_{i}.jpg"
ocr_text = pytesseract.image_to_string(Image.open(image_file), lang="eng+hin")
text += ocr_text + " "
os.remove(image_file)
return text
def extract_ocr(pdf_path):
pages = convert_pdf_to_images(pdf_path)
image_counter = save_images(pages)
text = extract_text_from_images(image_counter)
txt_file_path = (
f"./extracted_texts/{os.path.splitext(os.path.basename(pdf_path))[0]}.txt"
)
with open(txt_file_path, "w") as file:
file.write(text)
return text
"""def extract_docx(docx_path):
loader = Docx2txtLoader(docx_path)
docs = loader.load()
text = [doc.text for doc in docs]
text_file_path = (
f"./extracted_texts/{os.path.splitext(os.path.basename(docx_path))[0]}.txt"
)
with open(text_file_path, "w") as file:
file.write(" ".join(text))
return docs"""
docx_path = "/Users/saumya/Documents/Government/files/Producer Enterprise/Ujjala/Draft 6th PMC Minutes - Dairy Value Chain.docx"
text = extract_ocr(docx_path)
print(text)
| [] |
2024-01-10 | Samoppakiks/autofiles | keypoints.py | import openai
from config import openaiapi
import main
import os
openai.api_key = openaiapi
def get_points(text):
"""Returns a list of keypoints from text."""
extraction = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{
"role": "system",
"content": "You are a helpful assistant working for the government, who reads the text given from a government file, and then creates an executive summary of the subject matter in the form of a bulleted list",
},
{
"role": "user",
"content": f"Please give an executive summary of the follwing text, making sure not to miss any major event or relevant information that may be useful for understanding the subject matter. Here is the text: '{text}'",
},
],
)
return extraction.choices[0].message["content"].strip()
def keypoints(chunks):
"""Returns a list of keypoints from a list of chunks."""
points = []
for chunk in chunks:
print(f"Getting keypoints for chunk: {chunk}")
points.append(get_points(chunk))
print(f"keypoints: {points}")
return points
def kp(file_path):
chunks = main.get_chunks(file_path)
kp = keypoints(chunks)
new_file_path = (
f"./keypoints/{os.path.splitext(os.path.basename(file_path))[0]}.txt"
)
with open(new_file_path, "w") as file:
file.write("\n".join(kp))
return kp
file_path = "extracted_texts/Draft 6th PMC Minutes - Dairy Value Chain.txt"
kps = kp(file_path)
print(kps)
| [
"Please give an executive summary of the follwing text, making sure not to miss any major event or relevant information that may be useful for understanding the subject matter. Here is the text: 'PLACEHOLDER'",
"You are a helpful assistant working for the government, who reads the text given from a government file, and then creates an executive summary of the subject matter in the form of a bulleted list"
] |
2024-01-10 | catalystneuro/ontology-matching-service | scripts~embed_ontology.py | import os
import pickle
from pathlib import Path
from copy import deepcopy
import numpy as np
import tiktoken
from langchain.embeddings.openai import OpenAIEmbeddings
import obonet
from tqdm import tqdm
from qdrant_client import QdrantClient
from qdrant_client.http.models import Distance, VectorParams
from qdrant_client.http import models
def extract_string_within_quotes(term):
start = term.find('"') + 1
end = term.find('"', start)
if start != -1 and end != -1: # Check if quotes are found
term = term[start:end]
return term
def extract_string_within_backslashes(term):
start = term.find("\\") + 1
end = term.find("\\", start)
if start != -1 and end != -1: # Check if quotes are found
term = term[start:end]
return term
def create_synonym_string(node_data):
synonym_string = ""
if 'synonym' in node_data:
synonym_list = []
for synonym in node_data['synonym']:
term = extract_string_within_quotes(synonym)
synonym_list.append(term)
else:
synonym_string = " ".join(synonym_list)
return synonym_string.strip()
def clean_definition_string(node_data):
definition = ""
if "def" in node_data:
definition = node_data["def"]
definition = extract_string_within_backslashes(definition)
definition = extract_string_within_quotes(definition)
clean_definition = definition.replace('"', "").strip()
return clean_definition
def get_info_dict_for_term(node_id, node_data):
synonym_string = create_synonym_string(node_data)
definition = clean_definition_string(node_data)
direct_parents = node_data.get("is_a", [])
info_dict = dict(id=node_id, name=node_data["name"], definition=definition, synonyms=synonym_string, direct_parents=direct_parents)
return info_dict
def build_text_to_embed(node_info):
name = node_info["name"]
definition = node_info["definition"]
synonyms = node_info["synonyms"]
text_to_embed = name + " " + definition + " " + synonyms
node_info["text_to_embed"] = text_to_embed
return node_info
def build_parents_graph(node_id, id_to_info):
node_info = deepcopy(id_to_info[node_id])
direct_parents = node_info["direct_parents"]
parents_graph = []
for parent in direct_parents:
if "NBO" not in parent: # This eliminates reference nodes from other ontologies
continue
parent_info = build_parents_graph(parent, id_to_info) # Recursive call
parent_info.pop("synonyms", None)
parent_info.pop("direct_parents", None)
parent_info.pop("text_to_embed", None)
parents_graph.append(parent_info)
node_info["parent_structure"] = parents_graph
return node_info
url = 'http://purl.obolibrary.org/obo/nbo.obo'
graph = obonet.read_obo(url)
nodes_in_nbo = [node for node in graph.nodes if "NBO" in node] # This eliminates reference nodes from other ontologies
id_to_info = dict()
for node_id in nodes_in_nbo:
node_data = graph.nodes[node_id]
node_info = get_info_dict_for_term(node_id, node_data)
node_info = build_text_to_embed(node_info)
id_to_info[node_id] = node_info
for node_id, node_info in id_to_info.items():
node_info = build_parents_graph(node_id, id_to_info)
id_to_info[node_id] = node_info
# Calculate the price of embedding
# embedding model parameters
embedding_model = "text-embedding-ada-002"
embedding_encoding = "cl100k_base" # this the encoding for text-embedding-ada-002
encoding = tiktoken.get_encoding(embedding_encoding)
text_to_embed = [node_info["text_to_embed"] for node_info in id_to_info.values()]
length_of_encoding_per_node = [len(encoding.encode(text)) for text in text_to_embed]
total_tokens = sum(length_of_encoding_per_node)
dollars_per_token = 0.0001 / 1000 # Check the latest pricing to re-estimate this.
print(f"Total prize to embed {total_tokens * dollars_per_token: 2.4f} USD ")
file_path = Path('../data/nbo_embeddings.pickle')
overwrite = False
if overwrite:
# Remove file if it exists
if file_path.is_file():
os.remove(file_path)
if not file_path.is_file():
print(f'creating ebmedings in {file_path.stem}')
embedding_model = OpenAIEmbeddings(openai_api_key=os.environ["OPENAI_API_KEY"])
documents = text_to_embed
embeddings = embedding_model.embed_documents(documents)
with open(file_path, 'wb') as f:
pickle.dump(embeddings, f)
else:
with open(file_path, 'rb') as f:
embeddings = pickle.load(f)
embeddings = np.array(embeddings)
num_vectors, vector_size = embeddings.shape
qdrant_url = "https://18ef891e-d231-4fdd-8f6d-8e2d91337c24.us-east4-0.gcp.cloud.qdrant.io"
api_key = os.environ["QDRANT_API_KEY"]
client = QdrantClient(
url=qdrant_url,
api_key=api_key,
)
collection_name = "neuro_behavior_ontology"
client.recreate_collection(
collection_name=collection_name,
vectors_config=VectorParams(size=vector_size, distance=Distance.COSINE, on_disk=True),
)
batch_size = 100
points = []
for index, node_info in enumerate(tqdm(id_to_info.values())):
# Create a point
node_id = node_info["id"]
id = int(node_id.split(":")[1])
vector = embeddings[index]
payload = node_info
point = models.PointStruct(
id=id,
vector=vector.tolist(),
payload=payload,
)
points.append(point)
# If we have reached the batch size, upload the points
if len(points) == batch_size:
operation_info = client.upsert(
collection_name=collection_name,
wait=True,
points=points
)
# Clear points list after upload
points = []
# After all points are created, there might be some points that have not been uploaded yet
if points:
operation_info = client.upsert(
collection_name=collection_name,
wait=True,
points=points
) | [] |
2024-01-10 | catalystneuro/ontology-matching-service | src~ontology_matching_service~ontology_grounding.py | import requests
import warnings
import json
from pathlib import Path
import os
import openai
from langchain.embeddings.openai import OpenAIEmbeddings
from qdrant_client import QdrantClient
from functools import lru_cache
@lru_cache(maxsize=None)
def get_qdrant_client():
qdrant_url = "https://18ef891e-d231-4fdd-8f6d-8e2d91337c24.us-east4-0.gcp.cloud.qdrant.io"
api_key = os.getenv("QDRANT_API_KEY", "")
return QdrantClient(url=qdrant_url, api_key=api_key)
def embed_text(text: str) -> list:
embedding_model = OpenAIEmbeddings(openai_api_key=os.environ["OPENAI_API_KEY"])
embedding = embedding_model.embed_documents([text])[0]
return embedding
def semantic_match(text, top=30, score_threshold=0.5, ontology="neuro_behavior_ontology"):
qdrant_client = get_qdrant_client()
query_vector = embed_text(text)
results = qdrant_client.search(
collection_name=ontology,
query_vector=query_vector,
limit=top,
with_payload=True,
score_threshold=score_threshold,
with_vectors=False,
)
return results
def rerank_with_openai_from_ontologies_and_text(text, descriptions, ontology):
ontology_properly_fromatted = ontology.replace("_", " ").title()
if "ontology" not in ontology_properly_fromatted:
ontology_properly_fromatted += " Ontology"
prompt = f"""
Here is a text description of a behavior:
{text}
And here is the description of some entities from the {ontology_properly_fromatted}:
{descriptions}
From those entities, return a reranked list with the 10 most relevant entities to the text description ordered from most relevant to least relevant.
If an element does not seem related enough, do not include it in the list.
The format of the output list should be in json style and include only the ids of the entities in the list, not the names or definitions.
"""
model = "gpt-3.5-turbo"
openai.api_key = os.getenv("OPENAI_API_KEY")
system_content = "You are a neuroscience researcher and you are interested in figuring out specific behaviors from a text description"
completion = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": system_content},
{"role": "user", "content": prompt},
],
temperature=0.0,
)
response = completion.choices[0].message.content
try:
json_dict = json.loads(response)
key = list(json_dict.keys())[0]
entities_found = json_dict[key]
except:
warnings.warn(
f"The response from OpenAI was not in the expected format. Here is the response:"
"\n {response} \n Returning an empty list."
)
entities_found = []
return entities_found
def build_description_sentence(result: dict):
payload = result.payload
name = payload["name"]
definition = payload.get("definition", "")
synonym = payload.get("synonym", "")
id = payload["id"]
description = f"id: {id}, name: {name}, definition: {definition}, synonyms: {synonym}"
return description
def naive_rerank_with_bm25(example_to_test, results):
from rank_bm25 import BM25Okapi
import nltk
nltk.download("punkt")
from nltk.tokenize import word_tokenize
terms_info = [(result.payload["id"], result.payload["name"]) for result in results]
terms = [result.payload["text_to_embed"] for result in results]
text = example_to_test["text_excerpt"]
# Tokenizing
tokenize = True
if tokenize:
tokenized_corpus = [word_tokenize(term) for term in terms]
tokenized_query = word_tokenize(text)
else:
tokenized_corpus = [term for term in terms]
tokenized_query = text
# BM25 model
bm25 = BM25Okapi(tokenized_corpus)
# Get scores for the query against each term in the corpus
scores = bm25.get_scores(tokenized_query)
# Zip together terms' information and scores, and sort
sorted_terms = sorted(zip(terms_info, scores), key=lambda x: x[1], reverse=True)
return sorted_terms
def rerank(results_list, text: str, ontology: str):
# Build descriptions of the behavior for prompts and call LLM to rerank
description_list = [build_description_sentence(result=result) for result in results_list]
entities_found = rerank_with_openai_from_ontologies_and_text(
text=text,
descriptions=description_list,
ontology=ontology,
)
results_dict = {result.payload["id"]: result for result in results_list}
seen_ids = set()
matching_result_list = []
# Remove duplicates
for id in entities_found:
if id not in seen_ids and id in results_dict:
matching_result_list.append(results_dict[id])
seen_ids.add(id)
return matching_result_list
def rerank_bm25(results_list, text: str):
# Build descriptions of the behavior for prompts and call LLM to rerank
sorted_terms = naive_rerank_with_bm25(example_to_test={"text_excerpt": text}, results=results_list)
# LLM returns strings with the IDs. Match them back to the original results to get the full payloads
matching_result_list = []
for term in sorted_terms:
id = term[0][0]
matching_result = next((result for result in results_list if result.payload["id"] == id))
matching_result_list.append(matching_result)
return matching_result_list
| [
" \n Here is a text description of a behavior:\n PLACEHOLDER\n And here is the description of some entities from the PLACEHOLDER:\n PLACEHOLDER\n\n From those entities, return a reranked list with the 10 most relevant entities to the text description ordered from most relevant to least relevant.\n If an element does not seem related enough, do not include it in the list.\n The format of the output list should be in json style and include only the ids of the entities in the list, not the names or definitions.\n "
] |
2024-01-10 | LavanSumanan/Pumpkin | src~speech_to_text.py | from dotenv import load_dotenv
import os
import openai
# testing
from time import perf_counter
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def transcribe(filename):
audio_file = open(filename, "rb")
transcript = openai.Audio.translate(model="whisper-1", file=audio_file, response_format="text")
return transcript
def transcribe_multiprocessing(jobs, results):
while True:
job_input = jobs.get()
if not job_input:
print("ending processing")
break
(index, filename) = job_input
t_start = perf_counter()
audio_file = open(filename, "rb")
transcript = openai.Audio.translate(model="whisper-1", file=audio_file, response_format="text")
t_stop = perf_counter()
print("Time taken to transcribe (seconds): ", round(t_stop-t_start, 2))
results.put((index, transcript))
| [] |
2024-01-10 | MBlancoC/Brain_Check | Backend~vision_api.py | import os
from openai import OpenAI
def analyze_image_with_gpt4(image_data, question):
client = OpenAI(api_key="tu_clave_api_aquí")
# Codificar la pregunta a UTF-8
utf8_question = question.encode('utf-8').decode('utf-8')
response = client.completions.create(
model="gpt-4-vision-preview",
prompt=[
{
"role": "user",
"content": utf8_question
},
{
"role": "system",
"content": {"type": "image", "data": image_data}
}
],
max_tokens=300
)
return response
| [
"{'type': 'image', 'data': PLACEHOLDER}"
] |
2024-01-10 | YetAnotherJonWilson/danswer-for-solid | backend~danswer~chat~chat_prompts.py | from langchain.schema.messages import BaseMessage
from langchain.schema.messages import HumanMessage
from langchain.schema.messages import SystemMessage
from danswer.chunking.models import InferenceChunk
from danswer.configs.constants import CODE_BLOCK_PAT
from danswer.db.models import ChatMessage
from danswer.llm.utils import translate_danswer_msg_to_langchain
DANSWER_TOOL_NAME = "Current Search"
DANSWER_TOOL_DESCRIPTION = (
"A search tool that can find information on any topic "
"including up to date and proprietary knowledge."
)
DANSWER_SYSTEM_MSG = (
"Given a conversation (between Human and Assistant) and a final message from Human, "
"rewrite the last message to be a standalone question that captures required/relevant context from the previous "
"conversation messages."
)
TOOL_TEMPLATE = """
TOOLS
------
You can use tools to look up information that may be helpful in answering the user's \
original question. The available tools are:
{tool_overviews}
RESPONSE FORMAT INSTRUCTIONS
----------------------------
When responding to me, please output a response in one of two formats:
**Option 1:**
Use this if you want to use a tool. Markdown code snippet formatted in the following schema:
```json
{{
"action": string, \\ The action to take. Must be one of {tool_names}
"action_input": string \\ The input to the action
}}
```
**Option #2:**
Use this if you want to respond directly to the user. Markdown code snippet formatted in the following schema:
```json
{{
"action": "Final Answer",
"action_input": string \\ You should put what you want to return to use here
}}
```
"""
TOOL_LESS_PROMPT = """
Respond with a markdown code snippet in the following schema:
```json
{{
"action": "Final Answer",
"action_input": string \\ You should put what you want to return to use here
}}
```
"""
USER_INPUT = """
USER'S INPUT
--------------------
Here is the user's input \
(remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):
{user_input}
"""
TOOL_FOLLOWUP = """
TOOL RESPONSE:
---------------------
{tool_output}
USER'S INPUT
--------------------
Okay, so what is the response to my last comment? If using information obtained from the tools you must \
mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES!
If the tool response is not useful, ignore it completely.
{optional_reminder}{hint}
IMPORTANT! You MUST respond with a markdown code snippet of a json blob with a single action, and NOTHING else.
"""
def form_user_prompt_text(
query: str,
tool_text: str | None,
hint_text: str | None,
user_input_prompt: str = USER_INPUT,
tool_less_prompt: str = TOOL_LESS_PROMPT,
) -> str:
user_prompt = tool_text or tool_less_prompt
user_prompt += user_input_prompt.format(user_input=query)
if hint_text:
if user_prompt[-1] != "\n":
user_prompt += "\n"
user_prompt += "\nHint: " + hint_text
return user_prompt.strip()
def form_tool_section_text(
tools: list[dict[str, str]], retrieval_enabled: bool, template: str = TOOL_TEMPLATE
) -> str | None:
if not tools and not retrieval_enabled:
return None
if retrieval_enabled:
tools.append(
{"name": DANSWER_TOOL_NAME, "description": DANSWER_TOOL_DESCRIPTION}
)
tools_intro = []
for tool in tools:
description_formatted = tool["description"].replace("\n", " ")
tools_intro.append(f"> {tool['name']}: {description_formatted}")
tools_intro_text = "\n".join(tools_intro)
tool_names_text = ", ".join([tool["name"] for tool in tools])
return template.format(
tool_overviews=tools_intro_text, tool_names=tool_names_text
).strip()
def format_danswer_chunks_for_chat(chunks: list[InferenceChunk]) -> str:
return "\n".join(
f"DOCUMENT {ind}:{CODE_BLOCK_PAT.format(chunk.content)}"
for ind, chunk in enumerate(chunks, start=1)
)
def form_tool_followup_text(
tool_output: str,
query: str,
hint_text: str | None,
tool_followup_prompt: str = TOOL_FOLLOWUP,
ignore_hint: bool = False,
) -> str:
# If multi-line query, it likely confuses the model more than helps
if "\n" not in query:
optional_reminder = f"\nAs a reminder, my query was: {query}\n"
else:
optional_reminder = ""
if not ignore_hint and hint_text:
hint_text_spaced = f"\nHint: {hint_text}\n"
else:
hint_text_spaced = ""
return tool_followup_prompt.format(
tool_output=tool_output,
optional_reminder=optional_reminder,
hint=hint_text_spaced,
).strip()
def build_combined_query(
query_message: ChatMessage,
history: list[ChatMessage],
) -> list[BaseMessage]:
user_query = query_message.message
combined_query_msgs: list[BaseMessage] = []
if not user_query:
raise ValueError("Can't rephrase/search an empty query")
combined_query_msgs.append(SystemMessage(content=DANSWER_SYSTEM_MSG))
combined_query_msgs.extend(
[translate_danswer_msg_to_langchain(msg) for msg in history]
)
combined_query_msgs.append(
HumanMessage(
content=(
"Help me rewrite this final query into a standalone question that takes into consideration the "
f"past messages of the conversation. You must ONLY return the rewritten query and nothing else."
f"\n\nQuery:\n{query_message.message}"
)
)
)
return combined_query_msgs
| [
"\n",
"past messages of the conversation. You must ONLY return the rewritten query and nothing else.",
"\nHint: PLACEHOLDER",
"Help me rewrite this final query into a standalone question that takes into consideration the ",
"\nRespond with a markdown code snippet in the following schema:\n\n```json\n{{\n \"action\": \"Final Answer\",\n \"action_input\": string \\ You should put what you want to return to use here\n}}\n```\n",
"\nTOOLS\n------\nYou can use tools to look up information that may be helpful in answering the user's original question. The available tools are:\n\n{tool_overviews}\n\nRESPONSE FORMAT INSTRUCTIONS\n----------------------------\nWhen responding to me, please output a response in one of two formats:\n\n**Option 1:**\nUse this if you want to use a tool. Markdown code snippet formatted in the following schema:\n\n```json\n{{\n \"action\": string, \\ The action to take. Must be one of {tool_names}\n \"action_input\": string \\ The input to the action\n}}\n```\n\n**Option #2:**\nUse this if you want to respond directly to the user. Markdown code snippet formatted in the following schema:\n\n```json\n{{\n \"action\": \"Final Answer\",\n \"action_input\": string \\ You should put what you want to return to use here\n}}\n```\n"
] |
2024-01-10 | YetAnotherJonWilson/danswer-for-solid | backend~danswer~direct_qa~qa_block.py | import abc
import json
import re
from collections.abc import Callable
from collections.abc import Iterator
from copy import copy
import tiktoken
from langchain.schema.messages import AIMessage
from langchain.schema.messages import BaseMessage
from langchain.schema.messages import HumanMessage
from langchain.schema.messages import SystemMessage
from danswer.chunking.models import InferenceChunk
from danswer.configs.constants import CODE_BLOCK_PAT
from danswer.configs.constants import GENERAL_SEP_PAT
from danswer.configs.constants import QUESTION_PAT
from danswer.configs.constants import THOUGHT_PAT
from danswer.configs.constants import UNCERTAINTY_PAT
from danswer.direct_qa.interfaces import AnswerQuestionReturn
from danswer.direct_qa.interfaces import AnswerQuestionStreamReturn
from danswer.direct_qa.interfaces import DanswerAnswer
from danswer.direct_qa.interfaces import DanswerQuotes
from danswer.direct_qa.interfaces import QAModel
from danswer.direct_qa.models import LLMMetricsContainer
from danswer.direct_qa.qa_prompts import EMPTY_SAMPLE_JSON
from danswer.direct_qa.qa_prompts import JsonChatProcessor
from danswer.direct_qa.qa_prompts import WeakModelFreeformProcessor
from danswer.direct_qa.qa_utils import process_answer
from danswer.direct_qa.qa_utils import process_model_tokens
from danswer.llm.llm import LLM
from danswer.llm.utils import check_number_of_tokens
from danswer.llm.utils import dict_based_prompt_to_langchain_prompt
from danswer.llm.utils import get_default_llm_tokenizer
from danswer.llm.utils import str_prompt_to_langchain_prompt
from danswer.utils.logger import setup_logger
from danswer.utils.text_processing import clean_up_code_blocks
from danswer.utils.text_processing import escape_newlines
logger = setup_logger()
class QAHandler(abc.ABC):
"""Evolution of the `PromptProcessor` - handles both building the prompt and
processing the response. These are necessarily coupled, since the prompt determines
the response format (and thus how it should be parsed into an answer + quotes)."""
@abc.abstractmethod
def build_prompt(
self, query: str, context_chunks: list[InferenceChunk]
) -> list[BaseMessage]:
raise NotImplementedError
@property
def is_json_output(self) -> bool:
"""Does the model expected to output a valid json"""
return True
def process_llm_output(
self, model_output: str, context_chunks: list[InferenceChunk]
) -> tuple[DanswerAnswer, DanswerQuotes]:
return process_answer(
model_output, context_chunks, is_json_prompt=self.is_json_output
)
def process_llm_token_stream(
self, tokens: Iterator[str], context_chunks: list[InferenceChunk]
) -> AnswerQuestionStreamReturn:
yield from process_model_tokens(
tokens=tokens,
context_docs=context_chunks,
is_json_prompt=self.is_json_output,
)
class JsonChatQAHandler(QAHandler):
def build_prompt(
self, query: str, context_chunks: list[InferenceChunk]
) -> list[BaseMessage]:
return dict_based_prompt_to_langchain_prompt(
JsonChatProcessor.fill_prompt(
question=query, chunks=context_chunks, include_metadata=False
)
)
class SimpleChatQAHandler(QAHandler):
@property
def is_json_output(self) -> bool:
return False
def build_prompt(
self, query: str, context_chunks: list[InferenceChunk]
) -> list[BaseMessage]:
return str_prompt_to_langchain_prompt(
WeakModelFreeformProcessor.fill_prompt(
question=query,
chunks=context_chunks,
include_metadata=False,
)
)
class SingleMessageQAHandler(QAHandler):
def build_prompt(
self, query: str, context_chunks: list[InferenceChunk]
) -> list[BaseMessage]:
context_docs_str = "\n".join(
f"{CODE_BLOCK_PAT.format(c.content)}" for c in context_chunks
)
prompt: list[BaseMessage] = [
HumanMessage(
content="You are a question answering system that is constantly learning and improving. "
"You can process and comprehend vast amounts of text and utilize this knowledge "
"to provide accurate and detailed answers to diverse queries.\n"
"You ALWAYS responds with only a json containing an answer and quotes that support the answer.\n"
"Your responses are as INFORMATIVE and DETAILED as possible.\n"
f"{GENERAL_SEP_PAT}CONTEXT:\n\n{context_docs_str}"
f"{GENERAL_SEP_PAT}Sample response:"
f"{CODE_BLOCK_PAT.format(json.dumps(EMPTY_SAMPLE_JSON))}\n"
f"{QUESTION_PAT} {query}\n"
"Hint: Make the answer as DETAILED as possible and respond in JSON format!\n"
"Quotes MUST be EXACT substrings from provided documents!"
)
]
return prompt
class SingleMessageScratchpadHandler(QAHandler):
def build_prompt(
self, query: str, context_chunks: list[InferenceChunk]
) -> list[BaseMessage]:
cot_block = (
f"{THOUGHT_PAT} Use this section as a scratchpad to reason through the answer.\n\n"
f"{json.dumps(EMPTY_SAMPLE_JSON)}"
)
context_docs_str = "\n".join(
f"{CODE_BLOCK_PAT.format(c.content)}" for c in context_chunks
)
prompt: list[BaseMessage] = [
HumanMessage(
content="You are a question answering system that is constantly learning and improving. "
"You can process and comprehend vast amounts of text and utilize this knowledge "
"to provide accurate and detailed answers to diverse queries.\n"
f"{GENERAL_SEP_PAT}CONTEXT:\n\n{context_docs_str}{GENERAL_SEP_PAT}"
f"You MUST respond in the following format:"
f"{CODE_BLOCK_PAT.format(cot_block)}\n"
f"{QUESTION_PAT} {query}\n"
"Hint: Make the answer as detailed as possible and use a JSON! "
"Quotes can ONLY be EXACT substrings from provided documents!"
)
]
return prompt
def process_llm_output(
self, model_output: str, context_chunks: list[InferenceChunk]
) -> tuple[DanswerAnswer, DanswerQuotes]:
logger.debug(model_output)
model_clean = clean_up_code_blocks(model_output)
match = re.search(r'{\s*"answer":', model_clean)
if not match:
return DanswerAnswer(answer=None), DanswerQuotes(quotes=[])
final_json = escape_newlines(model_clean[match.start() :])
return process_answer(
final_json, context_chunks, is_json_prompt=self.is_json_output
)
def process_llm_token_stream(
self, tokens: Iterator[str], context_chunks: list[InferenceChunk]
) -> AnswerQuestionStreamReturn:
raise ValueError(
"This Scratchpad approach is not suitable for real time uses like streaming"
)
class JsonChatQAUnshackledHandler(QAHandler):
def build_prompt(
self, query: str, context_chunks: list[InferenceChunk]
) -> list[BaseMessage]:
prompt: list[BaseMessage] = []
complete_answer_not_found_response = (
'{"answer": "' + UNCERTAINTY_PAT + '", "quotes": []}'
)
prompt.append(
SystemMessage(
content=(
"Use the following pieces of context to answer the users question. Your response "
"should be in JSON format and contain an answer and (optionally) quotes that help support the answer. "
"Your responses should be informative, detailed, and consider all possibilities and edge cases. "
f"If you don't know the answer, respond with '{complete_answer_not_found_response}'\n"
f"Sample response:\n\n{json.dumps(EMPTY_SAMPLE_JSON)}"
)
)
)
prompt.append(
SystemMessage(
content='Start by reading the following documents and responding with "Acknowledged".'
)
)
for chunk in context_chunks:
prompt.append(SystemMessage(content=chunk.content))
prompt.append(AIMessage(content="Acknowledged"))
prompt.append(HumanMessage(content=f"Question: {query}\n"))
return prompt
def _tiktoken_trim_chunks(
chunks: list[InferenceChunk], max_chunk_toks: int = 512
) -> list[InferenceChunk]:
"""Edit chunks that have too high token count. Generally due to parsing issues or
characters from another language that are 1 char = 1 token
Trimming by tokens leads to information loss but currently no better way of handling
NOTE: currently gpt-3.5 / gpt-4 tokenizer across all LLMs currently
TODO: make "chunk modification" its own step in the pipeline
"""
encoder = tiktoken.get_encoding("cl100k_base")
new_chunks = copy(chunks)
for ind, chunk in enumerate(new_chunks):
tokens = encoder.encode(chunk.content)
if len(tokens) > max_chunk_toks:
new_chunk = copy(chunk)
new_chunk.content = encoder.decode(tokens[:max_chunk_toks])
new_chunks[ind] = new_chunk
return new_chunks
class QABlock(QAModel):
def __init__(self, llm: LLM, qa_handler: QAHandler) -> None:
self._llm = llm
self._qa_handler = qa_handler
def warm_up_model(self) -> None:
"""This is called during server start up to load the models into memory
in case the chosen LLM is not accessed via API"""
if self._llm.requires_warm_up:
logger.info(
"Warming up LLM, this should only run for in memory LLMs like GPT4All"
)
self._llm.invoke("Ignore this!")
def answer_question(
self,
query: str,
context_docs: list[InferenceChunk],
metrics_callback: Callable[[LLMMetricsContainer], None] | None = None,
) -> AnswerQuestionReturn:
trimmed_context_docs = _tiktoken_trim_chunks(context_docs)
prompt = self._qa_handler.build_prompt(query, trimmed_context_docs)
model_out = self._llm.invoke(prompt)
if metrics_callback is not None:
prompt_tokens = sum(
[
check_number_of_tokens(
text=p.content, encode_fn=get_default_llm_tokenizer()
)
for p in prompt
]
)
response_tokens = check_number_of_tokens(
text=model_out, encode_fn=get_default_llm_tokenizer()
)
metrics_callback(
LLMMetricsContainer(
prompt_tokens=prompt_tokens, response_tokens=response_tokens
)
)
return self._qa_handler.process_llm_output(model_out, trimmed_context_docs)
def answer_question_stream(
self,
query: str,
context_docs: list[InferenceChunk],
) -> AnswerQuestionStreamReturn:
trimmed_context_docs = _tiktoken_trim_chunks(context_docs)
prompt = self._qa_handler.build_prompt(query, trimmed_context_docs)
tokens = self._llm.stream(prompt)
yield from self._qa_handler.process_llm_token_stream(
tokens, trimmed_context_docs
)
| [
"You are a question answering system that is constantly learning and improving. ",
"Hint: Make the answer as detailed as possible and use a JSON! ",
"Question: PLACEHOLDER\n",
"Quotes can ONLY be EXACT substrings from provided documents!",
"should be in JSON format and contain an answer and (optionally) quotes that help support the answer. ",
"You can process and comprehend vast amounts of text and utilize this knowledge ",
"PLACEHOLDERCONTEXT:\n\nPLACEHOLDERPLACEHOLDER",
"You ALWAYS responds with only a json containing an answer and quotes that support the answer.\n",
"Start by reading the following documents and responding with \"Acknowledged\".",
"Your responses should be informative, detailed, and consider all possibilities and edge cases. ",
"Your responses are as INFORMATIVE and DETAILED as possible.\n",
"PLACEHOLDERSample response:",
"You MUST respond in the following format:",
"PLACEHOLDERCONTEXT:\n\nPLACEHOLDER",
"If you don't know the answer, respond with 'PLACEHOLDER'\n",
"Hint: Make the answer as DETAILED as possible and respond in JSON format!\n",
"Quotes MUST be EXACT substrings from provided documents!",
"PLACEHOLDER PLACEHOLDER\n",
"Use the following pieces of context to answer the users question. Your response ",
"to provide accurate and detailed answers to diverse queries.\n",
"Acknowledged"
] |
2024-01-10 | YetAnotherJonWilson/danswer-for-solid | backend~danswer~direct_qa~answer_question.py | from collections.abc import Callable
from sqlalchemy.orm import Session
from danswer.chunking.models import InferenceChunk
from danswer.configs.app_configs import DISABLE_GENERATIVE_AI
from danswer.configs.app_configs import ENABLE_DANSWERBOT_REFLEXION
from danswer.configs.app_configs import NUM_DOCUMENT_TOKENS_FED_TO_GENERATIVE_MODEL
from danswer.configs.app_configs import QA_TIMEOUT
from danswer.configs.constants import IGNORE_FOR_QA
from danswer.datastores.document_index import get_default_document_index
from danswer.db.feedback import create_query_event
from danswer.db.models import User
from danswer.direct_qa.exceptions import OpenAIKeyMissing
from danswer.direct_qa.exceptions import UnknownModelError
from danswer.direct_qa.llm_utils import get_default_qa_model
from danswer.direct_qa.models import LLMMetricsContainer
from danswer.direct_qa.qa_utils import get_usable_chunks
from danswer.search.danswer_helper import query_intent
from danswer.search.keyword_search import retrieve_keyword_documents
from danswer.search.models import QueryFlow
from danswer.search.models import RerankMetricsContainer
from danswer.search.models import RetrievalMetricsContainer
from danswer.search.models import SearchType
from danswer.search.semantic_search import chunks_to_search_docs
from danswer.search.semantic_search import retrieve_ranked_documents
from danswer.secondary_llm_flows.answer_validation import get_answer_validity
from danswer.server.models import QAResponse
from danswer.server.models import QuestionRequest
from danswer.utils.logger import setup_logger
from danswer.utils.timing import log_function_time
logger = setup_logger()
@log_function_time()
def answer_qa_query(
question: QuestionRequest,
user: User | None,
db_session: Session,
disable_generative_answer: bool = DISABLE_GENERATIVE_AI,
answer_generation_timeout: int = QA_TIMEOUT,
real_time_flow: bool = True,
enable_reflexion: bool = ENABLE_DANSWERBOT_REFLEXION,
retrieval_metrics_callback: Callable[[RetrievalMetricsContainer], None]
| None = None,
rerank_metrics_callback: Callable[[RerankMetricsContainer], None] | None = None,
llm_metrics_callback: Callable[[LLMMetricsContainer], None] | None = None,
) -> QAResponse:
query = question.query
filters = question.filters
use_keyword = question.use_keyword
offset_count = question.offset if question.offset is not None else 0
logger.info(f"Received QA query: {query}")
query_event_id = create_query_event(
query=query,
selected_flow=SearchType.KEYWORD,
llm_answer=None,
user_id=user.id if user is not None else None,
db_session=db_session,
)
predicted_search, predicted_flow = query_intent(query)
if use_keyword is None:
use_keyword = predicted_search == SearchType.KEYWORD
user_id = None if user is None else user.id
if use_keyword:
ranked_chunks: list[InferenceChunk] | None = retrieve_keyword_documents(
query,
user_id,
filters,
get_default_document_index(),
retrieval_metrics_callback=retrieval_metrics_callback,
)
unranked_chunks: list[InferenceChunk] | None = []
else:
ranked_chunks, unranked_chunks = retrieve_ranked_documents(
query,
user_id,
filters,
get_default_document_index(),
retrieval_metrics_callback=retrieval_metrics_callback,
rerank_metrics_callback=rerank_metrics_callback,
)
if not ranked_chunks:
return QAResponse(
answer=None,
quotes=None,
top_ranked_docs=None,
lower_ranked_docs=None,
predicted_flow=predicted_flow,
predicted_search=predicted_search,
query_event_id=query_event_id,
)
if disable_generative_answer:
logger.debug("Skipping QA because generative AI is disabled")
return QAResponse(
answer=None,
quotes=None,
top_ranked_docs=chunks_to_search_docs(ranked_chunks),
lower_ranked_docs=chunks_to_search_docs(unranked_chunks),
# set flow as search so frontend doesn't ask the user if they want
# to run QA over more documents
predicted_flow=QueryFlow.SEARCH,
predicted_search=predicted_search,
query_event_id=query_event_id,
)
try:
qa_model = get_default_qa_model(
timeout=answer_generation_timeout, real_time_flow=real_time_flow
)
except (UnknownModelError, OpenAIKeyMissing) as e:
return QAResponse(
answer=None,
quotes=None,
top_ranked_docs=chunks_to_search_docs(ranked_chunks),
lower_ranked_docs=chunks_to_search_docs(unranked_chunks),
predicted_flow=predicted_flow,
predicted_search=predicted_search,
error_msg=str(e),
query_event_id=query_event_id,
)
# remove chunks marked as not applicable for QA (e.g. Google Drive file
# types which can't be parsed). These chunks are useful to show in the
# search results, but not for QA.
filtered_ranked_chunks = [
chunk for chunk in ranked_chunks if not chunk.metadata.get(IGNORE_FOR_QA)
]
# get all chunks that fit into the token limit
usable_chunks = get_usable_chunks(
chunks=filtered_ranked_chunks,
token_limit=NUM_DOCUMENT_TOKENS_FED_TO_GENERATIVE_MODEL,
offset=offset_count,
)
logger.debug(
f"Chunks fed to LLM: {[chunk.semantic_identifier for chunk in usable_chunks]}"
)
error_msg = None
try:
d_answer, quotes = qa_model.answer_question(
query, usable_chunks, metrics_callback=llm_metrics_callback
)
except Exception as e:
# exception is logged in the answer_question method, no need to re-log
d_answer, quotes = None, None
error_msg = f"Error occurred in call to LLM - {e}" # Used in the QAResponse
if not real_time_flow and enable_reflexion and d_answer is not None:
valid = False
if d_answer.answer is not None:
valid = get_answer_validity(query, d_answer.answer)
return QAResponse(
answer=d_answer.answer if d_answer else None,
quotes=quotes.quotes if quotes else None,
top_ranked_docs=chunks_to_search_docs(ranked_chunks),
lower_ranked_docs=chunks_to_search_docs(unranked_chunks),
predicted_flow=predicted_flow,
predicted_search=predicted_search,
eval_res_valid=True if valid else False,
error_msg=error_msg,
query_event_id=query_event_id,
)
return QAResponse(
answer=d_answer.answer if d_answer else None,
quotes=quotes.quotes if quotes else None,
top_ranked_docs=chunks_to_search_docs(ranked_chunks),
lower_ranked_docs=chunks_to_search_docs(unranked_chunks),
predicted_flow=predicted_flow,
predicted_search=predicted_search,
error_msg=error_msg,
query_event_id=query_event_id,
)
| [] |
2024-01-10 | YetAnotherJonWilson/danswer-for-solid | backend~danswer~chat~chat_llm.py | from collections.abc import Callable
from collections.abc import Iterator
from uuid import UUID
from langchain.schema.messages import AIMessage
from langchain.schema.messages import BaseMessage
from langchain.schema.messages import HumanMessage
from langchain.schema.messages import SystemMessage
from danswer.chat.chat_prompts import build_combined_query
from danswer.chat.chat_prompts import DANSWER_TOOL_NAME
from danswer.chat.chat_prompts import form_tool_followup_text
from danswer.chat.chat_prompts import form_user_prompt_text
from danswer.chat.chat_prompts import format_danswer_chunks_for_chat
from danswer.chat.tools import call_tool
from danswer.configs.app_configs import NUM_DOCUMENT_TOKENS_FED_TO_CHAT
from danswer.configs.constants import IGNORE_FOR_QA
from danswer.configs.model_configs import GEN_AI_MAX_INPUT_TOKENS
from danswer.datastores.document_index import get_default_document_index
from danswer.db.models import ChatMessage
from danswer.db.models import Persona
from danswer.direct_qa.interfaces import DanswerAnswerPiece
from danswer.direct_qa.interfaces import DanswerChatModelOut
from danswer.direct_qa.qa_utils import get_usable_chunks
from danswer.llm.build import get_default_llm
from danswer.llm.llm import LLM
from danswer.llm.utils import get_default_llm_tokenizer
from danswer.llm.utils import translate_danswer_msg_to_langchain
from danswer.search.semantic_search import retrieve_ranked_documents
from danswer.utils.logger import setup_logger
from danswer.utils.text_processing import extract_embedded_json
from danswer.utils.text_processing import has_unescaped_quote
logger = setup_logger()
LLM_CHAT_FAILURE_MSG = "The large-language-model failed to generate a valid response."
def _parse_embedded_json_streamed_response(
tokens: Iterator[str],
) -> Iterator[DanswerAnswerPiece | DanswerChatModelOut]:
final_answer = False
just_start_stream = False
model_output = ""
hold = ""
finding_end = 0
for token in tokens:
model_output += token
hold += token
if (
final_answer is False
and '"action":"finalanswer",' in model_output.lower().replace(" ", "")
):
final_answer = True
if final_answer and '"actioninput":"' in model_output.lower().replace(
" ", ""
).replace("_", ""):
if not just_start_stream:
just_start_stream = True
hold = ""
if has_unescaped_quote(hold):
finding_end += 1
hold = hold[: hold.find('"')]
if finding_end <= 1:
if finding_end == 1:
finding_end += 1
yield DanswerAnswerPiece(answer_piece=hold)
hold = ""
logger.debug(model_output)
model_final = extract_embedded_json(model_output)
if "action" not in model_final or "action_input" not in model_final:
raise ValueError("Model did not provide all required action values")
yield DanswerChatModelOut(
model_raw=model_output,
action=model_final["action"],
action_input=model_final["action_input"],
)
return
def _find_last_index(
lst: list[int], max_prompt_tokens: int = GEN_AI_MAX_INPUT_TOKENS
) -> int:
"""From the back, find the index of the last element to include
before the list exceeds the maximum"""
running_sum = 0
last_ind = 0
for i in range(len(lst) - 1, -1, -1):
running_sum += lst[i]
if running_sum > max_prompt_tokens:
last_ind = i + 1
break
if last_ind >= len(lst):
raise ValueError("Last message alone is too large!")
return last_ind
def danswer_chat_retrieval(
query_message: ChatMessage,
history: list[ChatMessage],
llm: LLM,
user_id: UUID | None,
) -> str:
if history:
query_combination_msgs = build_combined_query(query_message, history)
reworded_query = llm.invoke(query_combination_msgs)
else:
reworded_query = query_message.message
# Good Debug/Breakpoint
ranked_chunks, unranked_chunks = retrieve_ranked_documents(
reworded_query,
user_id=user_id,
filters=None,
datastore=get_default_document_index(),
)
if not ranked_chunks:
return "No results found"
if unranked_chunks:
ranked_chunks.extend(unranked_chunks)
filtered_ranked_chunks = [
chunk for chunk in ranked_chunks if not chunk.metadata.get(IGNORE_FOR_QA)
]
# get all chunks that fit into the token limit
usable_chunks = get_usable_chunks(
chunks=filtered_ranked_chunks,
token_limit=NUM_DOCUMENT_TOKENS_FED_TO_CHAT,
)
return format_danswer_chunks_for_chat(usable_chunks)
def _drop_messages_history_overflow(
system_msg: BaseMessage | None,
system_token_count: int,
history_msgs: list[BaseMessage],
history_token_counts: list[int],
final_msg: BaseMessage,
final_msg_token_count: int,
) -> list[BaseMessage]:
"""As message history grows, messages need to be dropped starting from the furthest in the past.
The System message should be kept if at all possible and the latest user input which is inserted in the
prompt template must be included"""
if len(history_msgs) != len(history_token_counts):
# This should never happen
raise ValueError("Need exactly 1 token count per message for tracking overflow")
prompt: list[BaseMessage] = []
# Start dropping from the history if necessary
all_tokens = history_token_counts + [system_token_count, final_msg_token_count]
ind_prev_msg_start = _find_last_index(all_tokens)
if system_msg and ind_prev_msg_start <= len(history_msgs):
prompt.append(system_msg)
prompt.extend(history_msgs[ind_prev_msg_start:])
prompt.append(final_msg)
return prompt
def llm_contextless_chat_answer(
messages: list[ChatMessage],
tokenizer: Callable | None = None,
system_text: str | None = None,
) -> Iterator[str]:
try:
prompt_msgs = [translate_danswer_msg_to_langchain(msg) for msg in messages]
if system_text:
tokenizer = tokenizer or get_default_llm_tokenizer()
system_tokens = len(tokenizer(system_text))
system_msg = SystemMessage(content=system_text)
message_tokens = [msg.token_count for msg in messages] + [system_tokens]
else:
message_tokens = [msg.token_count for msg in messages]
last_msg_ind = _find_last_index(message_tokens)
remaining_user_msgs = prompt_msgs[last_msg_ind:]
if not remaining_user_msgs:
raise ValueError("Last user message is too long!")
if system_text:
all_msgs = [system_msg] + remaining_user_msgs
else:
all_msgs = remaining_user_msgs
return get_default_llm().stream(all_msgs)
except Exception as e:
logger.error(f"LLM failed to produce valid chat message, error: {e}")
return (msg for msg in [LLM_CHAT_FAILURE_MSG]) # needs to be an Iterator
def llm_contextual_chat_answer(
messages: list[ChatMessage],
persona: Persona,
user_id: UUID | None,
tokenizer: Callable,
) -> Iterator[str]:
retrieval_enabled = persona.retrieval_enabled
system_text = persona.system_text
tool_text = persona.tools_text
hint_text = persona.hint_text
last_message = messages[-1]
previous_messages = messages[:-1]
previous_msgs_as_basemessage = [
translate_danswer_msg_to_langchain(msg) for msg in previous_messages
]
# Failure reasons include:
# - Invalid LLM output, wrong format or wrong/missing keys
# - No "Final Answer" from model after tool calling
# - LLM times out or is otherwise unavailable
# - Calling invalid tool or tool call fails
# - Last message has more tokens than model is set to accept
# - Missing user input
try:
if not last_message.message:
raise ValueError("User chat message is empty.")
# Build the prompt using the last user message
user_text = form_user_prompt_text(
query=last_message.message,
tool_text=tool_text,
hint_text=hint_text,
)
last_user_msg = HumanMessage(content=user_text)
# Count tokens once to reuse
previous_msg_token_counts = [msg.token_count for msg in previous_messages]
system_tokens = len(tokenizer(system_text)) if system_text else 0
last_user_msg_tokens = len(tokenizer(user_text))
prompt = _drop_messages_history_overflow(
system_msg=SystemMessage(content=system_text) if system_text else None,
system_token_count=system_tokens,
history_msgs=previous_msgs_as_basemessage,
history_token_counts=previous_msg_token_counts,
final_msg=last_user_msg,
final_msg_token_count=last_user_msg_tokens,
)
llm = get_default_llm()
# Good Debug/Breakpoint
tokens = llm.stream(prompt)
final_result: DanswerChatModelOut | None = None
final_answer_streamed = False
for result in _parse_embedded_json_streamed_response(tokens):
if isinstance(result, DanswerAnswerPiece) and result.answer_piece:
yield result.answer_piece
final_answer_streamed = True
if isinstance(result, DanswerChatModelOut):
final_result = result
break
if final_answer_streamed:
return
if final_result is None:
raise RuntimeError("Model output finished without final output parsing.")
if (
retrieval_enabled
and final_result.action.lower() == DANSWER_TOOL_NAME.lower()
):
tool_result_str = danswer_chat_retrieval(
query_message=last_message,
history=previous_messages,
llm=llm,
user_id=user_id,
)
else:
tool_result_str = call_tool(final_result, user_id=user_id)
# The AI's tool calling message
tool_call_msg_text = final_result.model_raw
tool_call_msg_token_count = len(tokenizer(tool_call_msg_text))
# Create the new message to use the results of the tool call
tool_followup_text = form_tool_followup_text(
tool_output=tool_result_str,
query=last_message.message,
hint_text=hint_text,
)
tool_followup_msg = HumanMessage(content=tool_followup_text)
tool_followup_tokens = len(tokenizer(tool_followup_text))
# Drop previous messages, the drop order goes: previous messages in the history,
# the last user prompt and generated intermediate messages from this recent prompt,
# the system message, then finally the tool message that was the last thing generated
follow_up_prompt = _drop_messages_history_overflow(
system_msg=SystemMessage(content=system_text) if system_text else None,
system_token_count=system_tokens,
history_msgs=previous_msgs_as_basemessage
+ [last_user_msg, AIMessage(content=tool_call_msg_text)],
history_token_counts=previous_msg_token_counts
+ [last_user_msg_tokens, tool_call_msg_token_count],
final_msg=tool_followup_msg,
final_msg_token_count=tool_followup_tokens,
)
# Good Debug/Breakpoint
tokens = llm.stream(follow_up_prompt)
for result in _parse_embedded_json_streamed_response(tokens):
if isinstance(result, DanswerAnswerPiece) and result.answer_piece:
yield result.answer_piece
final_answer_streamed = True
if final_answer_streamed is False:
raise RuntimeError("LLM did not to produce a Final Answer after tool call")
except Exception as e:
logger.error(f"LLM failed to produce valid chat message, error: {e}")
yield LLM_CHAT_FAILURE_MSG
def llm_chat_answer(
messages: list[ChatMessage],
persona: Persona | None,
user_id: UUID | None,
tokenizer: Callable,
) -> Iterator[str]:
# Common error cases to keep in mind:
# - User asks question about something long ago, due to context limit, the message is dropped
# - Tool use gives wrong/irrelevant results, model gets confused by the noise
# - Model is too weak of an LLM, fails to follow instructions
# - Bad persona design leads to confusing instructions to the model
# - Bad configurations, too small token limit, mismatched tokenizer to LLM, etc.
if persona is None:
return llm_contextless_chat_answer(messages)
elif persona.retrieval_enabled is False and persona.tools_text is None:
return llm_contextless_chat_answer(
messages, tokenizer, system_text=persona.system_text
)
return llm_contextual_chat_answer(
messages=messages, persona=persona, user_id=user_id, tokenizer=tokenizer
)
| [] |
2024-01-10 | Dr-Hutchinson/nicolay | pages~1_%F0%9F%94%8D_RAG%20with%20Hay%20and%20Nicolay.py | import streamlit as st
import json
import pygsheets
from google.oauth2 import service_account
import re
from openai import OpenAI
import cohere
import os
import pandas as pd
import numpy as np
from datetime import datetime as dt
import time
from concurrent.futures import ThreadPoolExecutor
import re
# version 0.3 - Experiment for making sequential API calls for semantic search.
st.set_page_config(
page_title="Nicolay: Exploring the Speeches of Abraham Lincoln with AI (version 0.2)",
layout='wide',
page_icon='🔍'
)
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
client = OpenAI()
os.environ["CO_API_KEY"]= st.secrets["cohere_api_key"]
co = cohere.Client()
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
credentials = service_account.Credentials.from_service_account_info(
st.secrets["gcp_service_account"], scopes = scope)
gc = pygsheets.authorize(custom_credentials=credentials)
api_sheet = gc.open('api_outputs')
api_outputs = api_sheet.sheet1
# System prompt
def load_prompt(file_name):
"""Load prompt from a file."""
with open(file_name, 'r') as file:
return file.read()
# Function to ensure prompts are loaded into session state
def load_prompts():
if 'keyword_model_system_prompt' not in st.session_state:
st.session_state['keyword_model_system_prompt'] = load_prompt('prompts/keyword_model_system_prompt.txt')
if 'response_model_system_prompt' not in st.session_state:
st.session_state['response_model_system_prompt'] = load_prompt('prompts/response_model_system_prompt.txt')
if 'app_into' not in st.session_state:
st.session_state['app_intro'] = load_prompt('prompts/app_intro.txt')
if 'keyword_search_explainer' not in st.session_state:
st.session_state['keyword_search_explainer'] = load_prompt('prompts/keyword_search_explainer.txt')
if 'semantic_search_explainer' not in st.session_state:
st.session_state['semantic_search_explainer'] = load_prompt('prompts/semantic_search_explainer.txt')
if 'relevance_ranking_explainer' not in st.session_state:
st.session_state['relevance_ranking_explainer'] = load_prompt('prompts/relevance_ranking_explainer.txt')
if 'nicolay_model_explainer' not in st.session_state:
st.session_state['nicolay_model_explainer'] = load_prompt('prompts/nicolay_model_explainer.txt')
# Ensure prompts are loaded
load_prompts()
# Now you can use the prompts from session state
keyword_prompt = st.session_state['keyword_model_system_prompt']
response_prompt = st.session_state['response_model_system_prompt']
app_intro = st.session_state['app_intro']
keyword_search_explainer = st.session_state['keyword_search_explainer']
semantic_search_explainer = st.session_state['semantic_search_explainer']
relevance_ranking_explainer = st.session_state['relevance_ranking_explainer']
nicolay_model_explainer = st.session_state['nicolay_model_explainer']
# Streamlit interface
st.title("Exploring RAG with Nicolay and Hay")
image_url = 'http://danielhutchinson.org/wp-content/uploads/2024/01/nicolay_hay.png'
#st.markdown(f'<img src="{image_url}" width="700">', unsafe_allow_html=True)
st.image(image_url, width=600)
st.subheader("**Navigating this App:**")
st.write("Expand the **How It Works?** box below for a walkthrough of the app. Continue to the search interface below to begin exploring Lincoln's speeches.")
with st.expander("**How It Works - Exploring RAG with Hay and Nicolay**"):
st.write(app_intro)
# Query input
with st.form("Search Interface"):
st.markdown("Enter your query below:")
user_query = st.text_input("Query")
st.write("**Search Options**:")
st.write("Note that at least one search method must be selected to perform Response and Analysis.")
perform_keyword_search = st.toggle("Weighted Keyword Search", value=True)
perform_semantic_search = st.toggle("Semantic Search", value=True)
# Always display the reranking toggle
perform_reranking = st.toggle("Response and Analysis", value=False, key="reranking")
# Display a warning message if reranking is selected without any search methods
if perform_reranking and not (perform_keyword_search or perform_semantic_search):
st.warning("Response & Analysis requires at least one of the search methods (keyword or semantic).")
with st.expander("Additional Search Options (In Development)"):
st.markdown("The Hay model will suggest keywords based on your query, but you can select your own criteria for more focused keyword search using the interface below.")
st.markdown("Weighted Keywords")
default_values = [1.0, 1.0, 1.0, 1.0, 1.0] # Default weights as floats
user_weighted_keywords = {}
for i in range(1, 6):
col1, col2 = st.columns(2)
with col1:
keyword = st.text_input(f"Keyword {i}", key=f"keyword_{i}")
with col2:
weight = st.number_input(f"Weight for Keyword {i}", min_value=0.0, value=default_values[i-1], step=0.1, key=f"weight_{i}")
if keyword:
user_weighted_keywords[keyword] = weight
# User input for year and text keywords
st.header("Year and Text Filters")
user_year_keywords = st.text_input("Year Keywords (comma-separated - example: 1861, 1862, 1863)")
#user_text_keywords = st.text_input("Text Keywords")
user_text_keywords = st.multiselect("Text Selection:", ['At Peoria, Illinois', 'A House Divided', 'Eulogy on Henry Clay', 'Farewell Address', 'Cooper Union Address', 'First Inaugural Address', 'Second Inaugural Address', 'July 4th Message to Congress', 'First Annual Message', 'Second Annual Message', 'Third Annual Message', 'Fourth Annual Message', 'Emancipation Proclamation', 'Public Letter to James Conkling', 'Gettysburg Address'])
submitted = st.form_submit_button("Submit")
if submitted:
valid_search_condition = perform_keyword_search or perform_semantic_search
if valid_search_condition:
st.subheader("Starting RAG Process: (takes about 30-60 seconds in total)")
# Load data
#lincoln_speeches_file_path = 'C:\\Users\\danie\\Desktop\\Consulting Work\\Gibson - UTSA\\lincolnbot\\script development\\nicolay_assistant\\lincoln-speeches_final_formatted.json'
lincoln_speeches_file_path = 'data/lincoln_speech_corpus.json'
keyword_frequency_file_path = 'data/voyant_word_counts.json'
lincoln_speeches_embedded = "lincoln_index_embedded.csv"
# define functions
def load_json(file_path):
with open(file_path, 'r') as file:
data = json.load(file)
return data
lincoln_data = load_json(lincoln_speeches_file_path)
keyword_data = load_json(keyword_frequency_file_path)
# Convert JSON data to a dictionary with 'text_id' as the key for easy access
lincoln_dict = {item['text_id']: item for item in lincoln_data}
# function for loading JSON 'text_id' for comparsion for semantic search results
def get_source_and_summary(text_id):
# Convert numerical text_id to string format used in JSON
text_id_str = f"Text #: {text_id}"
return lincoln_dict.get(text_id_str, {}).get('source'), lincoln_dict.get(text_id_str, {}).get('summary')
def find_instances_expanded_search(dynamic_weights, original_weights, data, year_keywords=None, text_keywords=None, top_n=5):
instances = []
# original processing for text_keywords formatted as strings - however, inconsistencies in the finetuning dataset cause issues here. For now code below is used.
#text_keywords_list = [keyword.strip().lower() for keyword in text_keywords.split(',')] if text_keywords else []
if text_keywords:
if isinstance(text_keywords, list):
text_keywords_list = [keyword.strip().lower() for keyword in text_keywords]
else:
text_keywords_list = [keyword.strip().lower() for keyword in text_keywords.split(',')]
else:
text_keywords_list = []
for entry in data:
if 'full_text' in entry and 'source' in entry:
entry_text_lower = entry['full_text'].lower()
source_lower = entry['source'].lower()
summary_lower = entry.get('summary', '').lower()
keywords_lower = ' '.join(entry.get('keywords', [])).lower()
match_source_year = not year_keywords or any(str(year) in source_lower for year in year_keywords)
match_source_text = not text_keywords or any(re.search(r'\b' + re.escape(keyword.lower()) + r'\b', source_lower) for keyword in text_keywords_list)
if match_source_year and match_source_text:
total_dynamic_weighted_score = 0
keyword_counts = {}
keyword_positions = {}
combined_text = entry_text_lower + ' ' + summary_lower + ' ' + keywords_lower
for keyword in original_weights.keys():
keyword_lower = keyword.lower()
for match in re.finditer(r'\b' + re.escape(keyword_lower) + r'\b', combined_text):
count = len(re.findall(r'\b' + re.escape(keyword_lower) + r'\b', combined_text))
dynamic_weight = dynamic_weights.get(keyword, 0)
if count > 0:
keyword_counts[keyword] = count
total_dynamic_weighted_score += count * dynamic_weight
keyword_index = match.start()
original_weight = original_weights[keyword]
keyword_positions[keyword_index] = (keyword, original_weight)
if keyword_positions:
highest_original_weighted_position = max(keyword_positions.items(), key=lambda x: x[1][1])[0]
context_length = 300
start_quote = max(0, highest_original_weighted_position - context_length)
end_quote = min(len(entry_text_lower), highest_original_weighted_position + context_length)
snippet = entry['full_text'][start_quote:end_quote]
instances.append({
"text_id": entry['text_id'],
"source": entry['source'],
"summary": entry.get('summary', ''),
"quote": snippet.replace('\n', ' '),
"weighted_score": total_dynamic_weighted_score,
"keyword_counts": keyword_counts
})
instances.sort(key=lambda x: x['weighted_score'], reverse=True)
return instances[:top_n]
# Updated main search function to use expanded search
def search_with_dynamic_weights_expanded(user_keywords, json_data, year_keywords=None, text_keywords=None, top_n_results=5):
total_words = sum(term['rawFreq'] for term in json_data['corpusTerms']['terms'])
relative_frequencies = {term['term'].lower(): term['rawFreq'] / total_words for term in json_data['corpusTerms']['terms']}
inverse_weights = {keyword: 1 / relative_frequencies.get(keyword.lower(), 1) for keyword in user_keywords}
max_weight = max(inverse_weights.values())
normalized_weights = {keyword: (weight / max_weight) * 10 for keyword, weight in inverse_weights.items()}
return find_instances_expanded_search(
dynamic_weights=normalized_weights,
original_weights=user_keywords, # Using user-provided keywords as original weights for snippet centering
data=lincoln_data,
year_keywords=year_keywords,
text_keywords=text_keywords,
top_n=top_n_results
)
def get_embedding(text, model="text-embedding-ada-002"):
text = text.replace("\n", " ")
response = client.embeddings.create(input=[text], model=model)
return np.array(response.data[0].embedding)
def cosine_similarity(vec1, vec2):
dot_product = np.dot(vec1, vec2)
norm_vec1 = np.linalg.norm(vec1)
norm_vec2 = np.linalg.norm(vec2)
return dot_product / (norm_vec1 * norm_vec2)
def search_text(df, user_query, n=5):
user_query_embedding = get_embedding(user_query)
df["similarities"] = df['embedding'].apply(lambda x: cosine_similarity(x, user_query_embedding))
top_n = df.sort_values("similarities", ascending=False).head(n)
return top_n, user_query_embedding
def segment_text(text, segment_size=100):
words = text.split()
return [' '.join(words[i:i+segment_size]) for i in range(0, len(words), segment_size)]
def compare_segments_with_query_parallel(segments, query_embedding):
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(get_embedding, segment) for segment in segments]
segment_embeddings = [future.result() for future in futures]
return [(segments[i], cosine_similarity(segment_embeddings[i], query_embedding)) for i in range(len(segments))]
def extract_full_text(record):
marker = "Full Text:\n"
# Check if the record is a string
if isinstance(record, str):
# Finding the position where the 'Full Text' starts
marker_index = record.find(marker)
if marker_index != -1:
# Extracting text starting from the position after the marker
return record[marker_index + len(marker):].strip()
else:
return ""
else:
# Handle cases where the record is NaN or None
return ""
def remove_duplicates(search_results, semantic_matches):
combined_results = pd.concat([search_results, semantic_matches])
#st.write("Before Deduplication:", combined_results.shape)
deduplicated_results = combined_results.drop_duplicates(subset='text_id')
#st.write("After Deduplication:", deduplicated_results.shape)
return deduplicated_results
def format_reranked_results_for_model_input(reranked_results):
formatted_results = []
# Limiting to the top 3 results
top_three_results = reranked_results[:3]
for result in top_three_results:
formatted_entry = f"Match {result['Rank']}: " \
f"Search Type - {result['Search Type']}, " \
f"Text ID - {result['Text ID']}, " \
f"Source - {result['Source']}, " \
f"Summary - {result['Summary']}, " \
f"Key Quote - {result['Key Quote']}, " \
f"Relevance Score - {result['Relevance Score']:.2f}"
formatted_results.append(formatted_entry)
return "\n\n".join(formatted_results)
# Function to get the full text from the Lincoln data based on text_id for final display of matching results
def get_full_text_by_id(text_id, data):
return next((item['full_text'] for item in data if item['text_id'] == text_id), None)
# Function to highlight truncated quotes for Nicolay model outputs
def highlight_key_quote(text, key_quote):
# Example based on your quotes
# Split the key_quote into beginning and ending parts
parts = key_quote.split("...")
if len(parts) >= 2:
# Construct a regex pattern with the stable beginning and end, allowing for optional punctuation and spaces
pattern = re.escape(parts[0]) + r"\s*.*?\s*" + re.escape(parts[-1]) + r"[.;,]?"
else:
# If there's no '...', use the entire quote with optional punctuation and spaces at the end
pattern = re.escape(key_quote) + r"\s*[.;,]?"
# Compile the regex pattern for efficiency
regex = re.compile(pattern, re.IGNORECASE)
# Find all matches
matches = regex.findall(text)
# Replace matches with highlighted version
for match in matches:
text = text.replace(match, f"<mark>{match}</mark>")
return text
def record_api_outputs():
now = dt.now()
d1 = {'msg':[msg], 'date':[now]}
df1 = pd.DataFrame(data=d1, index=None)
sh1 = gc.open('api_outputs')
wks1 = sh1[0]
cells1 = wks1.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row1 = len(cells1)
api_outputs = api_sheet.sheet1
wks1.set_dataframe(df1,(end_row1+1,1), copy_head=False, extend=True)
if user_query:
# Construct the messages for the model
messages_for_model = [
{"role": "system", "content": keyword_prompt},
{"role": "user", "content": user_query}
]
# Send the messages to the fine-tuned model
response = client.chat.completions.create(
model="ft:gpt-3.5-turbo-1106:personal::8XtdXKGK", # Replace with your fine-tuned model
messages=messages_for_model,
temperature=0,
max_tokens=500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
msg = response.choices[0].message.content
record_api_outputs()
# Parse the response to extract generated keywords
api_response_data = json.loads(msg)
initial_answer = api_response_data['initial_answer']
model_weighted_keywords = api_response_data['weighted_keywords']
model_year_keywords = api_response_data['year_keywords']
model_text_keywords = api_response_data['text_keywords']
# Check if user provided any custom weighted keywords
if user_weighted_keywords:
# Use user-provided keywords
weighted_keywords = user_weighted_keywords
year_keywords = user_year_keywords.split(',') if user_year_keywords else []
text_keywords = user_text_keywords if user_text_keywords else []
else:
# Use model-generated keywords
weighted_keywords = model_weighted_keywords
year_keywords = model_year_keywords
text_keywords = model_text_keywords
with st.expander("**Hay's Response**", expanded=True):
st.markdown(initial_answer)
st.write("**How Does This Work?**")
st.write("The Initial Response based on the user quer is given by Hay, a finetuned large language model. This response helps Hay steer in the search process by guiding the selection of weighted keywords and informing the semantic search over the Lincoln speech corpus. Compare the Hay's Response Answer with Nicolay's Response and Analysis and the end of the RAG process to see how AI techniques can be used for historical sources.")
# Use st.columns to create two columns
col1, col2 = st.columns(2)
# Display keyword search results in the first column
with col1:
# Perform the dynamically weighted search
if perform_keyword_search:
search_results = search_with_dynamic_weights_expanded(
user_keywords=weighted_keywords,
json_data=keyword_data,
year_keywords=year_keywords,
text_keywords=text_keywords,
top_n_results=5 # You can adjust the number of results
)
st.markdown("### Keyword Search Results")
with st.expander("**How Does This Work?: Dynamically Weighted Keyword Search**"):
st.write(keyword_search_explainer)
for idx, result in enumerate(search_results, start=1):
expander_label = f"**Keyword Match {idx}**: *{result['source']}* `{result['text_id']}`"
with st.expander(expander_label):
st.markdown(f"{result['source']}")
st.markdown(f"{result['text_id']}")
st.markdown(f"{result['summary']}")
st.markdown(f"**Key Quote:**\n{result['quote']}")
st.markdown(f"**Weighted Score:** {result['weighted_score']}")
st.markdown("**Keyword Counts:**")
st.json(result['keyword_counts'])
with st.expander("**Keyword Search Metadata**"):
st.write("**Keyword Search Metadata**")
st.write("**User Query:**")
st.write(user_query)
st.write("**Model Response:**")
st.write(initial_answer)
st.write("**Weighted Keywords:**")
st.json(weighted_keywords) # Display the weighted keywords
st.write("**Year Keywords:**")
st.json(year_keywords)
st.write("**Text Keywords:**")
st.json(text_keywords)
#st.json(text_keywords) # Display the weighted keywords
st.write("**Raw Search Results**")
st.dataframe(search_results)
st.write("**Full Model Output**")
st.write(msg)
# Display semantic search results in the second column
with col2:
if perform_semantic_search:
embedding_size = 1536
st.markdown("### Semantic Search Results")
with st.expander("**How Does This Work?: Semantic Search with HyDE**"):
st.write(semantic_search_explainer)
# Before starting the semantic search
progress_text = "Semantic search in progress."
my_bar = st.progress(0, text=progress_text)
# Initialize the match counter
match_counter = 1
df = pd.read_csv(lincoln_speeches_embedded)
df['full_text'] = df['combined'].apply(extract_full_text)
df['embedding'] = df['full_text'].apply(lambda x: get_embedding(x) if x else np.zeros(embedding_size))
#st.write("Sample text_id from DataFrame:", df['Unnamed: 0'].iloc[0])
# After calculating embeddings for the dataset
my_bar.progress(20, text=progress_text) # Update to 20% after embeddings
df['source'], df['summary'] = zip(*df['Unnamed: 0'].apply(get_source_and_summary))
#st.write("Sample source and summary from DataFrame:", df[['source', 'summary']].iloc[0])
# Perform initial semantic search, using HyDE approach
#semantic_matches, user_query_embedding = search_text(df, user_query, n=5)
semantic_matches, user_query_embedding = search_text(df, user_query + initial_answer, n=5)
# After performing the initial semantic search
my_bar.progress(50, text=progress_text) # Update to 50% after initial search
# Loop for top semantic matches
for idx, row in semantic_matches.iterrows():
# Update progress bar based on the index
progress_update = 50 + ((idx + 1) / len(semantic_matches)) * 40
progress_update = min(progress_update, 100) # Ensure it doesn't exceed 100
my_bar.progress(progress_update / 100, text=progress_text) # Divide by 100 if using float scale
if match_counter > 5:
break
# Updated label to include 'text_id', 'source'
semantic_expander_label = f"**Semantic Match {match_counter}**: *{row['source']}* `Text #: {row['Unnamed: 0']}`"
with st.expander(semantic_expander_label, expanded=False):
# Display 'source', 'text_id', 'summary'
st.markdown(f"**Source:** {row['source']}")
st.markdown(f"**Text ID:** {row['Unnamed: 0']}")
st.markdown(f"**Summary:**\n{row['summary']}")
# Process for finding key quotes remains the same
segments = segment_text(row['full_text']) # Use 'full_text' for segmenting
#segment_scores = compare_segments_with_query(segments, user_query_embedding)
segment_scores = compare_segments_with_query_parallel(segments, user_query_embedding)
top_segment = max(segment_scores, key=lambda x: x[1])
st.markdown(f"**Key Quote:** {top_segment[0]}")
st.markdown(f"**Similarity Score:** {top_segment[1]:.2f}")
# Increment the match counter
match_counter += 1
my_bar.progress(100, text="Semantic search completed.")
time.sleep(1)
my_bar.empty() # Remove the progress bar
with st.expander("**Semantic Search Metadata**"):
st.write("**Semantic Search Metadata**")
st.dataframe(semantic_matches)
# Reranking results with Cohere's Reranker API Endpoint
if perform_reranking:
if isinstance(search_results, list):
search_results = pd.DataFrame(search_results)
# Convert 'text_id' in search_results to numeric format
search_results['text_id'] = search_results['text_id'].str.extract('(\d+)').astype(int)
# Rename the identifier column in semantic_matches to align with search_results
semantic_matches.rename(columns={'Unnamed: 0': 'text_id'}, inplace=True)
semantic_matches['text_id'] = semantic_matches['text_id'].astype(int)
deduplicated_results = remove_duplicates(search_results, semantic_matches)
all_combined_data = []
# Format deduplicated results for reranking
for index, result in deduplicated_results.iterrows():
# Check if the result is from keyword search or semantic search
if result.text_id in search_results.text_id.values and perform_keyword_search:
# Format as keyword search result
combined_data = f"Keyword|Text ID: {result.text_id}|{result.summary}|{result.quote}"
all_combined_data.append(combined_data)
elif result.text_id in semantic_matches.text_id.values and perform_semantic_search:
# Format as semantic search result
segments = segment_text(result.full_text)
segment_scores = compare_segments_with_query_parallel(segments, user_query_embedding)
top_segment = max(segment_scores, key=lambda x: x[1])
combined_data = f"Semantic|Text ID: {result.text_id}|{result.summary}|{top_segment[0]}"
all_combined_data.append(combined_data)
# Use all_combined_data for reranking
if all_combined_data:
st.markdown("### Ranked Search Results")
try:
reranked_response = co.rerank(
model='rerank-english-v2.0',
query=user_query,
documents=all_combined_data,
top_n=10
)
with st.expander("**How Does This Work?: Relevance Ranking with Cohere's Rerank**"):
st.write(relevance_ranking_explainer)
# DataFrame for storing all reranked results
full_reranked_results = []
for idx, result in enumerate(reranked_response):
combined_data = result.document['text']
data_parts = combined_data.split("|")
if len(data_parts) >= 4:
search_type, text_id_part, summary, quote = data_parts
text_id = str(text_id_part.split(":")[-1].strip())
summary = summary.strip()
quote = quote.strip()
# Retrieve source information
text_id_str = f"Text #: {text_id}"
source = lincoln_dict.get(text_id_str, {}).get('source', 'Source information not available')
# Store each result in the DataFrame
full_reranked_results.append({
'Rank': idx + 1,
'Search Type': search_type,
'Text ID': text_id,
'Source': source,
'Summary': summary,
'Key Quote': quote,
'Relevance Score': result.relevance_score
})
# Display only the top 3 results
if idx < 3:
expander_label = f"**Reranked Match {idx + 1} ({search_type} Search)**: `Text ID: {text_id}`"
with st.expander(expander_label):
st.markdown(f"Text ID: {text_id}")
st.markdown(f"{source}")
st.markdown(f"{summary}")
st.markdown(f"Key Quote:\n{quote}")
st.markdown(f"**Relevance Score:** {result.relevance_score:.2f}")
except Exception as e:
st.error("Error in reranking: " + str(e))
# Format reranked results for model input
formatted_input_for_model = format_reranked_results_for_model_input(full_reranked_results)
# Display full reranked results in an expander
with st.expander("**Result Reranking Metadata**"):
reranked_df = pd.DataFrame(full_reranked_results)
st.dataframe(reranked_df)
st.write("**Formatted Results:**")
st.write(formatted_input_for_model)
# API Call to the second GPT-3.5 model
if formatted_input_for_model:
# Construct the message for the model
messages_for_second_model = [
{"role": "system", "content": response_prompt},
{"role": "user", "content": f"User Query: {user_query}\n\n"
f"Initial Answer: {initial_answer}\n\n"
f"{formatted_input_for_model}"}
]
# Send the messages to the finetuned model
second_model_response = client.chat.completions.create(
model="ft:gpt-3.5-turbo-1106:personal::8clf6yi4", # Specific finetuned model
messages=messages_for_second_model,
temperature=0,
max_tokens=2000,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Process and display the model's response
response_content = second_model_response.choices[0].message.content
if response_content: # Assuming 'response_content' is the output from the second model
model_output = json.loads(response_content)
# Displaying the Final Answer
st.header("Nicolay's Response & Analysis:")
#with st.expander("Output Debugging:"):
# st.write(response_content)
with st.expander("**How Does This Work?: Nicolay's Response and Analysis**"):
st.write(nicolay_model_explainer)
with st.expander("**Nicolay's Response**", expanded=True):
final_answer = model_output.get("FinalAnswer", {})
st.markdown(f"**Response:**\n{final_answer.get('Text', 'No response available')}")
if final_answer.get("References"):
st.markdown("**References:**")
for reference in final_answer["References"]:
st.markdown(f"{reference}")
highlight_style = """
<style>
mark {
background-color: #90ee90;
color: black;
}
</style>
"""
doc_match_counter = 0
if "Match Analysis" in model_output:
st.markdown(highlight_style, unsafe_allow_html=True)
for match_key, match_info in model_output["Match Analysis"].items():
text_id = match_info.get("Text ID")
formatted_text_id = f"Text #: {text_id}"
key_quote = match_info.get("Key Quote", "")
speech = next((item for item in lincoln_data if item['text_id'] == formatted_text_id), None)
# Increment the counter for each match
doc_match_counter += 1
#if speech:
# Use the doc_match_counter in the expander label
# expander_label = f"**Match {doc_match_counter}**: *{speech['source']}* `{speech['text_id']}`"
# with st.expander(expander_label, expanded=False):
# st.markdown(f"**Source:** {speech['source']}")
# st.markdown(f"**Text ID:** {speech['text_id']}")
# st.markdown(f"**Summary:**\n{speech['summary']}")
# Handling escaped line breaks and highlighting the key quote
# formatted_full_text = speech['full_text'].replace("\\n", "<br>").replace(key_quote, f"<mark>{key_quote}</mark>")
# st.markdown(f"**Key Quote:**\n{key_quote}")
# st.markdown(f"**Full Text with Highlighted Quote:**", unsafe_allow_html=True)
# st.markdown(formatted_full_text, unsafe_allow_html=True)
#else:
# with st.expander(f"**Match {doc_match_counter}**: Not Found", expanded=False):
# st.markdown("Full text not found.")
if speech:
# Use the doc_match_counter in the expander label
expander_label = f"**Match {doc_match_counter}**: *{speech['source']}* `{speech['text_id']}`"
with st.expander(expander_label, expanded=False):
st.markdown(f"**Source:** {speech['source']}")
st.markdown(f"**Text ID:** {speech['text_id']}")
st.markdown(f"**Summary:**\n{speech['summary']}")
# Attempt direct highlighting
if key_quote in speech['full_text']:
formatted_full_text = speech['full_text'].replace("\\n", "<br>").replace(key_quote, f"<mark>{key_quote}</mark>")
else:
# If direct highlighting fails, use regex-based approach
formatted_full_text = highlight_key_quote(speech['full_text'], key_quote)
formatted_full_text = formatted_full_text.replace("\\n", "<br>")
st.markdown(f"**Key Quote:**\n{key_quote}")
st.markdown(f"**Full Text with Highlighted Quote:**", unsafe_allow_html=True)
st.markdown(formatted_full_text, unsafe_allow_html=True)
else:
with st.expander(f"**Match {doc_match_counter}**: Not Found", expanded=False):
st.markdown("Full text not found.")
# Displaying the Analysis Metadata
with st.expander("**Analysis Metadata**"):
# Displaying User Query Analysis
if "User Query Analysis" in model_output:
st.markdown("**User Query Analysis:**")
for key, value in model_output["User Query Analysis"].items():
st.markdown(f"- **{key}:** {value}")
# Displaying Initial Answer Review
if "Initial Answer Review" in model_output:
st.markdown("**Initial Answer Review:**")
for key, value in model_output["Initial Answer Review"].items():
st.markdown(f"- **{key}:** {value}")
# Displaying Match Analysis
#if "Match Analysis" in model_output:
# st.markdown("**Match Analysis:**")
# for match_key, match_info in model_output["Match Analysis"].items():
# st.markdown(f"- **{match_key}:**")
# for key, value in match_info.items():
# st.markdown(f" - {key}: {value}")
# Displaying Match Analysis
# Displaying Match Analysis
# Displaying Match Analysis
# Displaying Match Analysis
# Displaying Match Analysis
if "Match Analysis" in model_output:
st.markdown("**Match Analysis:**", unsafe_allow_html=True)
for match_key, match_info in model_output["Match Analysis"].items():
st.markdown(f"- **{match_key}:**", unsafe_allow_html=True)
for key, value in match_info.items():
if isinstance(value, dict):
nested_items_html = "<br>".join([f"  <b>{sub_key}:</b> {sub_value}" for sub_key, sub_value in value.items()])
st.markdown(f" <b>{key}:</b><br>{nested_items_html}<br>", unsafe_allow_html=True)
else:
st.markdown(f" <b>{key}:</b> {value}<br>", unsafe_allow_html=True)
# Displaying Meta Analysis
if "Meta Analysis" in model_output:
st.markdown("**Meta Analysis:**")
for key, value in model_output["Meta Analysis"].items():
st.markdown(f"- **{key}:** {value}")
# Displaying Model Feedback
if "Model Feedback" in model_output:
st.markdown("**Model Feedback:**")
for key, value in model_output["Model Feedback"].items():
st.markdown(f"- **{key}:** {value}")
st.write("**Full Model Output**:")
st.write(response_content)
else:
st.error("Search halted: Invalid search condition. Please ensure at least one search method is selected.")
| [
"response_model_system_prompt",
"keyword_model_system_prompt",
"User Query: PLACEHOLDER\n\nInitial Answer: PLACEHOLDER\n\nPLACEHOLDER"
] |
2024-01-10 | Dr-Hutchinson/nicolay | deprecations~nicolay_0.py | from langchain.prompts import PromptTemplate
from langchain.prompts import FewShotPromptTemplate
from langchain.chains import LLMChain
from langchain import OpenAI, SerpAPIWrapper, Wikipedia
from langchain.agents import initialize_agent, Tool
from langchain.agents.react.base import DocstoreExplorer
import openai
import csv
from datetime import datetime as dt
import pandas as pd
import numpy as np
from openai.embeddings_utils import get_embedding, cosine_similarity
import os
import re
import streamlit as st
import pygsheets
from google.oauth2 import service_account
import ssl
import base64
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.prompts import FewShotPromptTemplate
import os
import pandas as pd
import numpy as np
import re
from openai.embeddings_utils import get_embedding, cosine_similarity
import openai
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate, LLMChain
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
#from IPython.display import HTML
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
credentials = service_account.Credentials.from_service_account_info(
st.secrets["gcp_service_account"], scopes = scope)
gc = pygsheets.authorize(custom_credentials=credentials)
st.set_page_config(
page_title="Nicolay: An AI Search Tool for the Speeches of Abraham Lincoln (version 0.0)",
layout='wide',
page_icon='🔍'
)
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
openai.api_key = os.getenv("OPENAI_API_KEY")
#os.environ["SERPAPI_API_KEY"] = st.secrets["serpapi_api_key"]
st.title("Nicolay: An AI Search Tool for the Speeches of Abraham Lincoln")
def button_one():
st.write("This application uses OpenAI's GPT AI models to answer questions about the collected speeches of Abraham Lincoln. Choose one of the options below, and pose a question about the text.")
semantic_search = "Semantic Search: Enter a question, and recieve sections of Lincoln's speeches that are the most closely related."
#ask_a_paragraph = "Ask a Paragraph: Internet Search. Select a Section from the text, and then pose a question. GPT-3 will search the internet to answer your question."
#ask_wiki = "Ask a Paragraph: Wikipedia. Select a Section from the text, and then pose a question. GPT-3 will search Wikipedia to answer your question."
ask_a_source = "Ask Nicolay: Pose a question about Lincoln's speeches, and a GPT AI model will share answers drawn from the text."
search_method = st.radio("Choose a method:", (semantic_search, ask_a_source))
model_choice = st.selectbox("Choose an AI model:", ('GPT-3.5', 'GPT-4'), index=1)
#section_number = st.number_input('Select a section number if you have selected Ask a Paragraph. You can find the section numbers to the bottom left, or through a semantic search.', step=1)
submission_text = st.text_area("Enter your question below. ")
submit_button_1 = st.button(label='Click here to submit your question.')
if submit_button_1:
st.subheader("Nicolay's analysis is underway. It can take a minute or two for every step of the process to be completed, so thank you for your patience. Nicolay's progress will be documented below.")
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
if model_choice == 'GPT-3.5':
model_select = 'gpt-3.5-turbo'
else:
model_select = 'gpt-4'
# semantic search via text embeddings with OpenAI Ada embedding model
datafile_path = "lincoln_index_embedded.csv"
df = pd.read_csv(datafile_path)
df["embedding"] = df.embedding.apply(eval).apply(np.array)
def embeddings_search():
# try this
datafile_path = "lincoln_index_embedded.csv"
df = pd.read_csv(datafile_path)
df["embedding"] = df.embedding.apply(eval).apply(np.array)
def search_text(df, product_description, n=3, pprint=True):
product_embedding = get_embedding(
product_description,
engine="text-embedding-ada-002"
)
df["similarities"] = df.embedding.apply(lambda x: cosine_similarity(x, product_embedding))
# Select the first three rows of the sorted DataFrame
top_three = df.sort_values("similarities", ascending=False).head(3)
# If `pprint` is True, print the output
#if pprint:
#for _, row in top_three.iterrows():
#print(row["combined"])
#print()
# Return the DataFrame with the added similarity values
return top_three
# Call the search_text() function and store the return value in a variable
results_df = search_text(df, submission_text, n=3)
# Reset the index and create a new column "index"
results_df = results_df.reset_index()
# Access the values in the "similarities" and "combined" columns
similarity1 = results_df.iloc[0]["similarities"]
combined1 = str(results_df.iloc[0]["combined"])
similarity2 = results_df.iloc[1]["similarities"]
combined2 = str(results_df.iloc[1]["combined"])
similarity3 = results_df.iloc[2]["similarities"]
combined3 = str(results_df.iloc[2]["combined"])
num_rows = results_df.shape[0]
# Iterate through the rows of the dataframe
for i in range(num_rows):
# Get the current row
row = results_df.iloc[i]
# Create an expander for the current row, with the label set to the row number
with st.expander(label="Text Section " + str(i) + ":", expanded=True):
# Display each cell in the row as a separate block of text
st.markdown("**Question:**")
st.write(submission_text)
st.markdown("**Below is a section of the text along with its semantic similarity score. It is one of the three highest scoring sections in the text.**")
st.write(row['similarities'])
combined_text = row['combined']
text_lines = combined_text.split('\n')
#for line in text_lines:
#st.text(line)
for line in text_lines:
st.markdown(line)
# Write the DataFrame to a CSV file
#results_df.to_csv('results_df.csv', index=False, columns=["similarities", "combined"])
#st.subheader("The steps below illustrate Nicolay's reasoning on this question.")
#st.write("Step 1 complete: Nicolay identified the most semantically similar text sections.")
#st.dataframe(results_df)
#st.write("Next step: relevancy check")
def ask_nicolay():
def search_text_2(df, product_description, n=3, pprint=True):
product_embedding = get_embedding(
product_description,
engine="text-embedding-ada-002"
)
df["similarities"] = df.embedding.apply(lambda x: cosine_similarity(x, product_embedding))
# Select the first three rows of the sorted DataFrame
top_three = df.sort_values("similarities", ascending=False).head(3)
# If `pprint` is True, print the output
#if pprint:
#for _, row in top_three.iterrows():
#print(row["combined"])
#print()
# Return the DataFrame with the added similarity values
return top_three
# Q&A doc prompt with langchain with prompts for determining relevance and extracting quotations.
results_df = search_text_2(df, submission_text, n=3)
# Reset the index and create a new column "index"
results_df = results_df.reset_index()
# Access the values in the "similarities" and "combined" columns
similarity1 = results_df.iloc[0]["similarities"]
combined1 = str(results_df.iloc[0]["combined"])
similarity2 = results_df.iloc[1]["similarities"]
combined2 = str(results_df.iloc[1]["combined"])
similarity3 = results_df.iloc[2]["similarities"]
combined3 = str(results_df.iloc[2]["combined"])
st.write("Step 1 complete - identified the most semantically similar text sections.")
st.dataframe(results_df)
st.write("Next step - relevancy check.")
## k-shot prompts for relevance
currency_question = """1. Question\n\n"What points does Lincoln make about currency reform?"\n\n2. Section:\n\nText #: 58: Source: Second Annual Message. December 1, 1862\n\n"The new commercial treaty between the United States and the Sultan of Turkey has been carried into execution.\n\nA commercial and consular treaty has been negotiated, subject to the Senate's consent, with Liberia; and a similar negotiation is now pending with the republic of Hayti. A considerable improvement of the national commerce is expected to result from these measures.\n\nOur relations with Great Britain, France, Spain, Portugal, Russia, Prussia, Denmark, Sweden, Austria, the Netherlands, Italy, Rome, and the other European states, remain undisturbed. Very favorable relations also continue to be maintained with Turkey, Morocco, China and Japan.\n\nDuring the last year there has not only been no change of our previous relations with the independent states of our own continent, but, more friendly sentiments than have heretofore existed, are believed to be entertained by these neighbors, whose safety and progress, are so intimately connected with our own. This statement especially applies to Mexico, Nicaragua, Costa Rica, Honduras, Peru, and Chile.\n\nThe commission under the convention with the republic of New Granada closed its session, without having audited and passed upon, all the claims which were submitted to it. A proposition is pending to revive the convention, that it may be able to do more complete justice. The joint commission between the United States and the republic of Costa Rica has completed its labors and submitted its report.\n\nI have favored the project for connecting the United States with Europe by an Atlantic telegraph, and a similar project to extend the telegraph from San Francisco, to connect by a Pacific telegraph with the line which is being extended across the Russian empire.\n\nThe Territories of the United States, with unimportant exceptions, have remained undisturbed by the civil war, and they are exhibiting such evidence of prosperity as justifies an expectation that some of them will soon be in a condition to be organized as States, and be constitutionally admitted into the federal Union.\n\nThe immense mineral resources of some of those Territories ought to be developed as rapidly as possible. Every step in that direction would have a tendency to improve the revenues of the government, and diminish the burdens of the people. It is worthy of your serious consideration whether some extraordinary measures to promote that end cannot be adopted. The means which suggests itself as most likely to be effective, is a scientific exploration of the mineral regions in those Territories, with a view to the publication of its results at home and in foreign countries---results which cannot fail to be auspicious.\n\nThe condition of the finances will claim your most diligent consideration. The vast expenditures incident to the military and naval operations required for the suppression of the rebellion, have hitherto been met with a promptitude, and certainty, unusual in similar circumstances, and the public credit has been fully maintained. The continuance of the war, however, and the increased disbursements made necessary by the augmented forces now in the field, demand your best reflections as to the best modes of providing the necessary revenue, without injury to business and with the least possible burdens upon labor.\nThe suspension of specie payments by the banks, soon after the commencement of your last session, made large issues of United States notes unavoidable. In no other way could the payment of the troops, and the satisfaction of other just demands, be so economically, or so well provided for. The judicious legislation of Congress, securing the receivability of these notes for loans and internal duties, and making them a legal tender for other debts , has made them an universal currency; and has satisfied, partially, at least, and for the time, the long felt want of an uniform circulating medium, saving thereby to the people, immense sums in discounts and exchanges.\n\nA return to specie payments, however, at the earliest period compatible with due regard to all interests concerned, should ever be kept in view. Fluctuations in the value of currency are always injurious, and to reduce these fluctuations to the lowest possible point will always be a leading purpose in wise legislation. Convertibility, prompt and certain convertibility into coin, is generally acknowledged to be the best and surest safeguard against them; and it is extremely doubtful whether a circulation of United States notes, payable in coin, and sufficiently large for the wants of the people, can be permanently, usefully and safely maintained.\n\nIs there, then, any other mode in which the necessary provision for the public wants can be made, and the great advantages of a safe and uniform currency secured?\n\nI know of none which promises so certain results, and is, at the same time, so unobjectionable, as the organization of banking associations, under a general act of Congress, well guarded in its provisions.\n\nTo such associations the government might furnish circulating notes, on the security of United States bonds deposited in the treasury. These notes, prepared under the supervision of proper officers, being uniform in appearance and security, and convertible always into coin, would at once protect labor against the evils of a vicious currency, and facilitate commerce by cheap and safe exchanges.\n\nA moderate reservation from the interest on the bonds would compensate the United States for the preparation and distribution of the notes and a general supervision of the system, and would lighten the burden of that part of the public debt employed as securities. The public credit, moreover, would be greatly improved, and the negotiation of new loans greatly facilitated by the steady market demand for government bonds which the adoption of the proposed system would create."\n\nSummary: In his Second Annual Message, Abraham Lincoln discusses the execution of the new commercial treaty between the United States and the Sultan of Turkey, as well as commercial and consular treaties with Liberia and Haiti. He describes the favorable relations maintained with European and other foreign states and the improved relations with neighboring countries in the Americas. Lincoln also addresses the financial situation, noting the suspension of specie payments and the introduction of United States notes as a temporary measure. He suggests the organization of banking associations under an act of Congress as a solution for providing public funds and a safe, uniform currency. Furthermore, he mentions the importance of developing the mineral resources in the Territories and the potential benefits of an Atlantic telegraph connecting the United States with Europe.\nKeywords: Abraham Lincoln, Second Annual Message, December 1 1862, commercial treaty, Sultan of Turkey, Liberia, Haiti, foreign relations, Americas, finances, suspension of specie payments, United States notes, banking associations, mineral resources, Territories, Atlantic telegraph.\n\n3. Key Words:”\n\ncurrency reform\n\n4. Relevance Determination: Section 58: Relevant\n\n5. Relevance Explanation: The section is directly and specifically relevant to the question because it contains key words such as "specie payments” and "banking associations" which are directly related to the question. Additionally, the background knowledge and context of the speech provide further evidence of the section's relevance to the question."""
railroad_question = """2. Section: \n\nText #: 71: Source: Third Annual Message. December 8, 1863.\n\n"But why any proclamation now upon this subject? This question is beset with the conflicting views that the step might be delayed too long or be taken too soon. In some States the elements for resumption seem ready for action, but remain inactive, apparently for want of a rallying point---a plan of action. Why shall A adopt the plan of B, rather than B that of A? And if A and B should agree, how can they know but that the general government here will reject their plan? By the proclamation a plan is presented which may be accepted by them as a rallying point, and which they are assured in advance will not be rejected here. This may bring them to act sooner than they otherwise would.\n\nThe objections to a premature presentation of a plan by the national Executive consists in the danger of committals on points which could be more safely left to further developments. Care has been taken to so shape the document as to avoid embarrassments from this source. Saying that, on certain terms, certain classes will be pardoned, with rights restored, it is not said that other classes, or other terms, will never be included. Saying that reconstruction will be accepted if presented in a specified way, it is not said it will never be accepted in any other way.\n\nThe movements, by State action, for emancipation in several of the States, not included in the emancipation proclamation, are matters of profound gratulation. And while I do not repeat in detail what I have hertofore so earnestly urged upon this subject, my general views and feelings remain unchanged; and I trust that Congress will omit no fair opportunity of aiding these important steps to a great consummation.\n\nIn the midst of other cares, however important, we must not lose sight of the fact that the war power is still our main reliance. To that power alone can we look, yet for a time, to give confidence to the people in the contested regions, that the insurgent power will not again overrun them. Until that confidence shall be established, little can be done anywhere for what is called reconstruction. Hence our chiefest care must still be directed to the army and navy, who have thus far borne their harder part so nobly and well. And it may be esteemed fortunate that in giving the greatest efficiency to these indispensable arms, we do also honorably recognize the gallant men, from commander to sentinel, who compose them, and to whom, more than to others, the world must stand indebted for the home of freedom disenthralled, regenerated, enlarged, and perpetuated."\n\nSummary: In this portion of the Third Annual Message, President Lincoln addresses the importance of presenting a plan for the resumption of national authority within States where it has been suspended. He argues that by providing a rallying point, states can act sooner to initiate reconstruction. He also expresses his satisfaction with the movements towards emancipation in states not covered by the Emancipation Proclamation and urges Congress to support these efforts. Lincoln emphasizes that the war power, represented by the army and navy, is still the primary means to establish confidence in contested regions and prevent the insurgent power from overrunning them. He acknowledges the essential role of the military in securing freedom and promoting reconstruction.\n\nKeywords: Third Annual Message, December 8, 1863, Abraham Lincoln, national authority, reconstruction, rallying point, Emancipation Proclamation, war power, army, navy, contested regions, insurgent power, freedom.\n\n4. Relevance Determination: Section_71: Irrelevant\n\n5. Relevance Explanation: The Section is irrelevant because it does not address the user's question about points Lincoln makes on railroad construction. The content of the Section focuses on other topics such as national authority, reconstruction, emancipation, and the role of the military during the Civil War."""
examples = [
{"question": "1. Question: What points does Lincoln make about currency reform?", "output": currency_question},
{"question": "1. Question: What points does Lincoln make about railroad construction?", "output": railroad_question}
]
prompt_instructions = "You are an AI expert on presidential history with a speciality on the life and times of Abraham Lincoln. In this exercise you are given a user supplied question, a Section of the Text, a Semantic Similarity Score, and a Method for determining the Section’s relevance to the Question. Your objective is to determine whether that Section of the text is directly and specifically relevant to the user question. You will use the Method below to fulfill this objective, taking each step by step.\n\nHere is your Method.\nMethod: Go step by step in answering the question.\n1. Question: You will be provided with a user question.\n2. Section: You will be given a section of the text from a speech by Abraham Lincoln, accompanied by a summary of that section and keywords associated with the text.\n3. Key Words: Identify key words in the Section that are specifically and directly related to the Question. Such key words could include specific locations, events, or people mentioned in the Section.\n4. Relevance Determination: Based on your review of the earlier steps in the Method, determine whether the section is relevant, and gauge your confidence (high, medium, low, or none) in this determination. High determination is specifically and directly related to the Question. If the section is relevant and ranked high, write ‘'Section_x: Relevant'. Otherwise, if the section is not relevant and the determination is less than high, write 'Section_x: Irrelevant'.\n5. Relevance Explanation: Based on your review in the earlier steps in the Method, explain why the Section’s relevance to the Question.\nLet’s begin.\n"
example_prompt = SystemMessagePromptTemplate.from_template(prompt_instructions)
human_message_prompt = HumanMessagePromptTemplate.from_template("Question: {question}\nKey Terms:")
chat_prompt = ChatPromptTemplate.from_messages([example_prompt, human_message_prompt])
chat = ChatOpenAI(temperature=0, model_name=model_select)
chain = LLMChain(llm=chat, prompt=chat_prompt)
r_check_1 = chain.run(question=str(submission_text + "\n2. Section:\n " + combined1))
#print(r_check_1)
r_check_2 = chain.run(question=str(submission_text + "\n2. Section:\n " + combined2))
#print(r_check_2)
r_check_3 = chain.run(question=str(submission_text + "\n2. Section:\n " + combined3))
#print(r_check_3
st.write("Step 2 complete: Nicolay's has made relevancy checks on the text sections.")
# combined function for combining sections + outputs, and then filtering via regex for relevant sections
combined_df = pd.DataFrame(columns=['output', 'r_check'])
combined_df['output'] = [combined1, combined2, combined3]
combined_df['r_check'] = [r_check_1, r_check_2, r_check_3]
st.dataframe(combined_df)
st.write("Next step: Answer the user question with supporting quotations from the relevant texts.")
# Use the re.IGNORECASE flag to make the regular expression case-insensitive
regex = re.compile(r'Relevance Explanation.*?(relevant)', re.IGNORECASE | re.DOTALL)
# Apply the regex pattern to the 'r_check' column and store the results in a new 'mask' column
combined_df['mask'] = combined_df['r_check'].str.contains(regex)
# Create a second mask to capture "this is relevant"
combined_df['second_mask'] = combined_df['r_check'].str.contains(r'this section is relevant', flags=re.IGNORECASE)
# Combine the two masks using the bitwise OR operator (|) and store the result in the 'mask' column
combined_df['mask'] = combined_df['mask'] | combined_df['second_mask']
# Filter the combined dataframe to include only rows where the 'mask' column is True
relevant_df = combined_df.loc[combined_df['mask']].copy()
# Check if there are any rows in the relevant_df dataframe
if relevant_df.empty:
# If there are no rows, print the desired message
st.write("No relevant sections identified.")
else:
# Otherwise, continue with the rest of the script
def combine_strings(row):
return row['output'] + '\nKey Terms\n' + row['r_check']
# Use the apply function to apply the combine_strings function to each row of the relevant_df dataframe
# and assign the result to the 'combined_string' column
relevant_df['combined_string'] = relevant_df.apply(combine_strings, axis=1)
final_sections = relevant_df['combined_string']
#final_sections.to_csv('final_sections.csv')
evidence_df = pd.DataFrame(final_sections)
evidence = '\n\n'.join(evidence_df['combined_string'])
evidence_df.to_csv('evidence.csv')
#print(evidence)
# Filter the relevant_df dataframe to include only the 'output' column
output_df = relevant_df[['output']]
# Convert the dataframe to a dictionary
output_dict = output_df.to_dict('records')
# Extract the values from the dictionary using a list comprehension
output_values = [d['output'] for d in output_dict]
# Print the output values to see the results
#print(output_values)
# begin quotation identiftication and answer prompt
cherokee_question = "1. Question: How did Lincoln regard the Cherokee?\n\n2. Text:\n\n\nText Number: 59\nSource: Second Annual Message. December 1, 1863\n\nThe Indian tribes upon our frontiers have, during the past year, manifested a spirit of insubordination, and, at several points, have engaged in open hostilities against the white settlements in their vicinity. The tribes occupying the Indian country south of Kansas, renounced their allegiance to the United States, and entered into treaties with the insurgents. Those who remained loyal to the United States were driven from the country. The chief of the Cherokees has visited this city for the purpose of restoring the former relations of the tribe with the United States. He alleges that they were constrained, by superior force, to enter into treaties with the insurgents, and that the United States neglected to furnish the protection which their treaty stipulations required.\n\n3. Compose Initial Answer: Lincoln regarded the Cherokee as a tribe that had been forced into renouncing their allegiance to the United States and entering into treaties with the Confederacy due to superior force and neglect on the part of the United States.\n\n4. Identify Supporting Quote: \"The chief of the Cherokees has visited this city for the purpose of restoring the former relations of the tribe with the United States. He alleges that they were constrained, by superior force, to enter into treaties with the insurgents, and that the United States neglected to furnish the protection which their treaty stipulations required.\" (Second Annual Message. December 1, 1863. (Text 59)\n\n5. Combined Answer with Supporting Quote: Lincoln discusses the Cherokee in his Second Annual Message of December 1, 1863. Lincoln notes notes the visit of the Cherokee chief to Washington D.C. “for the purpose of restoring the former relations of the tribe with the United States.” The Cherokee were “constrained, by superior force, to enter into treaties with the Confederacy.” Furthermore, the chief alleged “that the United States neglected to furnish the protection which their treaty stipulations required.” (Second Annual Message. December 1, 1863. Text 59)\n",
japan_question = "1. Question: Does Lincoln discuss Japan, and if so, what is the context of this discussion?\n\n2. Text\n\nText #: 72: \nSource: Fourth Annual Message. December 6, 1864.\n\nOwing to the peculiar situation of Japan, and the anomalous form of its government, the action of that empire in performing treaty stipulations is inconstant and capricious. Nevertheless, good progress has been effected by the western powers, moving with enlightened concert. Our own pecuniary claims have been allowed, or put in course of settlement, and the inland sea has been reopened to commerce. There is reason also to believe that these proceedings have increased rather than diminished the friendship of Japan towards the United States.\n\nCompose Initial Answer: Yes, Lincoln discusses Japan in his Fourth Annual Message of December 6, 1854, stating that the peculiar situation and anomalous form of government of Japan have made their actions in performing treaty stipulations inconsistent and capricious. However, he notes that progress has been made by the western powers in working together and that our own pecuniary claims have been allowed or put in settlement. Additionally, the inland sea has been reopened to commerce, and these proceedings have likely increased Japan's friendship towards the United States.\n\nIdentify Supporting Quote: \"Owing to the peculiar situation of Japan, and the anomalous form of its government, the action of that empire in performing treaty stipulations is inconstant and capricious... There is reason also to believe that these proceedings have increased rather than diminished the friendship of Japan towards the United States.\" (Fourth Annual Message, December 6, 1864. Text Number 72).\n\nCombined Answer with Supporting Quote: Yes, Lincoln discusses Japan in his Fourth Annual Message of December 6, 1864. Lincoln acknowledged that \"the action of [Japan] in performing treaty stipulations is inconstant and capricious\" due to their \"peculiar situation\" and \"anomalous form of government.\" However, he also noted that \"good progress has been effected by the western powers, moving with enlightened concert,\" as evidenced by the settlement of the United States' pecuniary claims and the reopening of the inland sea to commerce. Lincoln further suggested that these efforts had \"increased rather than diminished the friendship of Japan towards the United States.\" Thus, this message reflects Lincoln's recognition of the importance of international cooperation and diplomacy in navigating complex political and cultural landscapes such as that of Japan during the Late Tokugawa period. (Fourth Annual Message, December 6, 1864. Text Number 72).\n"
examples = [
{"question": "1. Question: How did Lincoln regard the Cherokee?", "output": cherokee_question},
{"question": "1. Question: Does Lincoln discuss Japan, and if so, what is the context of this discussion?", "output": japan_question}
]
prompt_instructions ="You are an AI question-answerer and quotation-selector. The focus of your expertise is interpreting the historic writings of Abraham Lincoln. In this exercise you will first be given a user question, a Section of a Lincoln writing, and a Method for answering the question and supporting it with an appropriate quotation from the Section. In following this Method you will complete each step by step until finished.\nHere is your Method.\nMethod: Go step by step in the question.\n1. Question: You will be provided with a user question.\n2. Text: You will be given a section from a Text written by Abraham Lincoln. The Text contains the Text Number, the Source of the Text, and the original prose by Lincoln. \n3. Compose Initial Answer: Based on the Question and information provided in the Text, compose a historically accurate Initial Answer to that Question. The Initial Answer should be incisive, brief, and well-written.\n4. Identify Supporting Quote: Based on the Answer, select a Supporting Quote from the Text that supports that Answer. Select the briefest and most relevant Supporting Quote possible. You can also use paraphrasing to further shorten the Supporting Quote. Provide a citation at the end of the Supporting Quote, in the following manner: (Source, Text Number).\n5. Combined Answer with Supporting Quote: Rewrite the Initial Answer to incorporate the Supporting Quote. This Combined Answer should be historically accurate, and demonstrating a writing style that is incisive, brief, and well-written. All Quotes used should be cited using the method above.\n\nLet’s begin.\n"
example_prompt = SystemMessagePromptTemplate.from_template(prompt_instructions)
human_message_prompt = HumanMessagePromptTemplate.from_template("Question: {question}\nKey Terms:")
chat_prompt = ChatPromptTemplate.from_messages([example_prompt, human_message_prompt])
chat = ChatOpenAI(temperature=0, model_name=model_select)
chain = LLMChain(llm=chat, prompt=chat_prompt)
# Create an empty list to store the final_analysis results
final_analysis_results = []
# Iterate over the output_values list
for output_value in output_values:
# Run the final_analysis step and store the result in a variable
final_analysis = chain.run(submission_text+output_value)
# Add the final_analysis result to the list
final_analysis_results.append(final_analysis)
# Create a Pandas dataframe from the output_values list
final_analysis_df = pd.DataFrame({'output_values': output_values, 'final_analysis': final_analysis_results})
# Save the dataframe to a CSV file
final_analysis_df.to_csv('final_analysis.csv', index=False)
st.subheader("Nicolay's Final Analysis:")
st.write("Step 3 complete: Here are Nicolay's analysis of Lincoln's speeches based on your question. Click on the dataframe boxes below to see the full outputs.")
st.dataframe(final_analysis_df)
st.write('\n\n')
#for result in final_analysis_results:
#print(result)
#def rearrange_combined_output(combined_output):
#lines = combined_output.strip().split("\n")
#source_line = next(line for line in lines if "Source:" in line)
#source_line_parts = source_line.split(":", 2)
#source_line_formatted = f"{source_line_parts[0]}:{source_line_parts[1]}\n{source_line_parts[2].strip()}"
#summary_line_start = next(i for i, line in enumerate(lines) if "Summary:" in line)
#keywords_line_start = next(i for i, line in enumerate(lines) if "Keywords:" in line)
#summary_lines = lines[summary_line_start:keywords_line_start]
#keywords_line = lines[keywords_line_start]
#full_text_start = next(i for i, line in enumerate(lines) if "Text" in line) + 1
#full_text_end = summary_line_start - 1
#full_text = "\n".join(lines[full_text_start:full_text_end])
#source_data = f"""Source Data:
#{source_line_formatted}
#{' '.join(summary_lines)}
#{keywords_line}
#Full Text:
#{full_text}
#"""
#return source_data
#source_data = rearrange_combined_output(combined1)
#print(source_data)
if search_method == semantic_search:
embeddings_search()
else:
ask_nicolay()
def button_two():
#Rank Bacon_bot Responses
with col1:
st.write("Rank the AI's Interpretation:")
sh1 = gc.open('AAS_temp')
wks1 = sh1[0]
submission_text = wks1.get_value('F2')
output = wks1.get_value('G2')
prompt_text = wks1.get_value('D2')
st.subheader('Your Question')
st.write(submission_text)
st.subheader("The AI's Answer:")
st.write(initial_analysis)
st.subheader("The AI's Interpretation:")
with st.form('form2'):
accuracy_score = st.slider("Is the AI's answer accuracte?", 0, 10, key='accuracy')
text_score = st.slider("Are the text sections the AI selected appropriate to the question?", 0, 10, key='text')
interpretation_score = st.slider("How effective was the AI's interpretation of the texts?", 0, 10, key='interpretation')
coherence_rank = st.slider("How coherent and well-written is the reply?", 0,10, key='coherence')
st.write("Transmitting the rankings takes a few moments. Thank you for your patience.")
submit_button_2 = st.form_submit_button(label='Submit Ranking')
if submit_button_2:
sh1 = gc.open('AAS_outputs_temp')
wks1 = sh1[0]
df = wks1.get_as_df(has_header=True, index_column=None, start='A1', end=('K2'), numerize=False)
name = df['user'][0]
submission_text = df['question'][0]
output = df['initial_analysis'][0]
combined_df = df['combined_df'][0]
relevant_texts = df['evidence'][0]
now = dt.now()
ranking_score = [accuracy_score, text_score, interpretation_score, coherence_rank]
ranking_average = mean(ranking_score)
def ranking_collection():
d4 = {'user':["0"], 'user_id':[user_id],'question':[submission_text], 'output':[initial_analysis], 'accuracy_score':[accuracy_score], 'text_score':[text_score],'interpretation_score':[interpretation_score], 'coherence':[coherence_rank], 'overall_ranking':[ranking_average], 'date':[now]}
df4 = pd.DataFrame(data=d4, index=None)
sh4 = gc.open('AAS_rankings')
wks4 = sh4[0]
cells4 = wks4.get_all_values(include_tailing_empty_rows=False, include_tailing_empty=False, returnas='matrix')
end_row4 = len(cells4)
wks4.set_dataframe(df4,(end_row4+1,1), copy_head=False, extend=True)
ranking_collection()
st.write('Rankings recorded - thank you! Feel free to continue your conversation with Francis Bacon.')
st.write("Select the 'Ask Bacon' button to ask the AI questions. Select 'Rank Bacon' to note your impressions of its responses.")
pages = {
0 : button_one,
1 : button_two,
}
if "current" not in st.session_state:
st.session_state.current = None
if st.button("Ask Nicolay"):
st.session_state.current = 0
if st.button("Ignore This Button"):
st.session_state.current = 1
if st.session_state.current != None:
pages[st.session_state.current]()
| [
"You are an AI question-answerer and quotation-selector. The focus of your expertise is interpreting the historic writings of Abraham Lincoln. In this exercise you will first be given a user question, a Section of a Lincoln writing, and a Method for answering the question and supporting it with an appropriate quotation from the Section. In following this Method you will complete each step by step until finished.\nHere is your Method.\nMethod: Go step by step in the question.\n1. Question: You will be provided with a user question.\n2. Text: You will be given a section from a Text written by Abraham Lincoln. The Text contains the Text Number, the Source of the Text, and the original prose by Lincoln. \n3. Compose Initial Answer: Based on the Question and information provided in the Text, compose a historically accurate Initial Answer to that Question. The Initial Answer should be incisive, brief, and well-written.\n4. Identify Supporting Quote: Based on the Answer, select a Supporting Quote from the Text that supports that Answer. Select the briefest and most relevant Supporting Quote possible. You can also use paraphrasing to further shorten the Supporting Quote. Provide a citation at the end of the Supporting Quote, in the following manner: (Source, Text Number).\n5. Combined Answer with Supporting Quote: Rewrite the Initial Answer to incorporate the Supporting Quote. This Combined Answer should be historically accurate, and demonstrating a writing style that is incisive, brief, and well-written. All Quotes used should be cited using the method above.\n\nLet’s begin.\n",
"[PLACEHOLDER, PLACEHOLDER]",
"You are an AI expert on presidential history with a speciality on the life and times of Abraham Lincoln. In this exercise you are given a user supplied question, a Section of the Text, a Semantic Similarity Score, and a Method for determining the Section’s relevance to the Question. Your objective is to determine whether that Section of the text is directly and specifically relevant to the user question. You will use the Method below to fulfill this objective, taking each step by step.\n\nHere is your Method.\nMethod: Go step by step in answering the question.\n1. Question: You will be provided with a user question.\n2. Section: You will be given a section of the text from a speech by Abraham Lincoln, accompanied by a summary of that section and keywords associated with the text.\n3. Key Words: Identify key words in the Section that are specifically and directly related to the Question. Such key words could include specific locations, events, or people mentioned in the Section.\n4. Relevance Determination: Based on your review of the earlier steps in the Method, determine whether the section is relevant, and gauge your confidence (high, medium, low, or none) in this determination. High determination is specifically and directly related to the Question. If the section is relevant and ranked high, write ‘'Section_x: Relevant'. Otherwise, if the section is not relevant and the determination is less than high, write 'Section_x: Irrelevant'.\n5. Relevance Explanation: Based on your review in the earlier steps in the Method, explain why the Section’s relevance to the Question.\nLet’s begin.\n",
"Question: {question}\nKey Terms:"
] |
2024-01-10 | Dr-Hutchinson/nicolay | nicolay_1.py | from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.prompts import FewShotPromptTemplate
from langchain.chains import LLMChain
from langchain import OpenAI, SerpAPIWrapper, Wikipedia
from langchain.agents import initialize_agent, Tool
from langchain.agents.react.base import DocstoreExplorer
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate, LLMChain
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
import openai
from openai.embeddings_utils import get_embedding, cosine_similarity
import csv
from datetime import datetime as dt
import pandas as pd
import numpy as np
import os
import re
import streamlit as st
import pygsheets
from google.oauth2 import service_account
import ssl
import base64
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
credentials = service_account.Credentials.from_service_account_info(
st.secrets["gcp_service_account"], scopes = scope)
gc = pygsheets.authorize(custom_credentials=credentials)
st.set_page_config(
page_title="Nicolay: Exploring the Speeches of Abraham Lincoln with AI (version 0.1)",
layout='wide',
page_icon='🔍'
)
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
openai.api_key = os.getenv("OPENAI_API_KEY")
#os.environ["SERPAPI_API_KEY"] = st.secrets["serpapi_api_key"]
st.title("Nicolay: Exploring the Speeches of Abraham Lincoln with AI")
st.write("This application uses OpenAI's GPT AI models to answer questions about the collected speeches of Abraham Lincoln. Choose one of the options below, and pose a question about Lincoln's speeches.")
semantic_search = "Semantic Search: Enter a question, and recieve sections of Lincoln's speeches that are the most closely related semantically."
#ask_a_paragraph = "Ask a Paragraph: Internet Search. Select a Section from the text, and then pose a question. GPT-3 will search the internet to answer your question."
#ask_wiki = "Ask a Paragraph: Wikipedia. Select a Section from the text, and then pose a question. GPT-3 will search Wikipedia to answer your question."
ask_a_source = "Ask Nicolay: Pose a question about Lincoln's speeches, and a GPT AI model will share answers drawn from the text. This process can take several minutes to complete."
search_method = st.radio("Choose a method:", (semantic_search, ask_a_source))
model_choice = st.selectbox("Choose an AI model for Ask Nicolay:", ('ChatGPT', 'GPT-4'), index=1)
#section_number = st.number_input('Select a section number if you have selected Ask a Paragraph. You can find the section numbers to the bottom left, or through a semantic search.', step=1)
submission_text = st.text_area("Enter your question below. ")
submit_button_1 = st.button(label='Click here to submit your question.')
if submit_button_1:
st.subheader("Nicolay's analysis is underway. It can take several minutes for every step of the process to be completed. Thank you for your patience. Nicolay's progress will be documented below.")
os.environ["OPENAI_API_KEY"] = st.secrets["openai_api_key"]
if model_choice == 'GPT-3.5':
model_select = 'gpt-3.5-turbo'
else:
model_select = 'gpt-4'
# semantic search via text embeddings with OpenAI Ada embedding model
datafile_path = "lincoln_index_embedded.csv"
df = pd.read_csv(datafile_path)
df["embedding"] = df.embedding.apply(eval).apply(np.array)
def embeddings_search():
# try this
datafile_path = "lincoln_index_embedded.csv"
df = pd.read_csv(datafile_path)
df["embedding"] = df.embedding.apply(eval).apply(np.array)
def search_text(df, product_description, n=3, pprint=True):
product_embedding = get_embedding(
product_description,
engine="text-embedding-ada-002"
)
df["similarities"] = df.embedding.apply(lambda x: cosine_similarity(x, product_embedding))
# Select the first three rows of the sorted DataFrame
top_three = df.sort_values("similarities", ascending=False).head(3)
# If `pprint` is True, print the output
#if pprint:
#for _, row in top_three.iterrows():
#print(row["combined"])
#print()
# Return the DataFrame with the added similarity values
return top_three
# Call the search_text() function and store the return value in a variable
results_df = search_text(df, submission_text, n=3)
# Reset the index and create a new column "index"
results_df = results_df.reset_index()
# Access the values in the "similarities" and "combined" columns
similarity1 = results_df.iloc[0]["similarities"]
combined1 = str(results_df.iloc[0]["combined"])
similarity2 = results_df.iloc[1]["similarities"]
combined2 = str(results_df.iloc[1]["combined"])
similarity3 = results_df.iloc[2]["similarities"]
combined3 = str(results_df.iloc[2]["combined"])
num_rows = results_df.shape[0]
# Iterate through the rows of the dataframe
for i in range(num_rows):
# Get the current row
row = results_df.iloc[i]
# working code - don't DELETE
with st.expander(label="Text Section " + str(i+1) + ":", expanded=True):
# Display each cell in the row as a separate block of text
st.markdown("**Question:**")
st.write(submission_text)
st.markdown("**Below is a section of the text along with its semantic similarity score. It is one of the three highest scoring sections in the text.**")
st.write(row['similarities'])
#combined_text = row['combined']
#combined_text = combined_text.replace('\\n\\n', '\n\n') # Convert plain string to actual newline characters
#text_lines = combined_text.split('\n\n')
#for line in text_lines:
#st.markdown(line.replace('\n', '<br>')) # Replace '\n' with '<br>' for line breaks in markdown
# end working code - don't DELETE
combined_text = row['combined']
combined_text = combined_text.replace('\\n\\n', '\n\n') # Convert plain string to actual newline characters
# Split the combined_text into sections
text_num, source, summary, keywords, full_text = combined_text.split('\n\n', 4)
# Remove the repeated labels from the values
text_num = text_num.replace("Text #:", "").strip()
source = source.replace("Source:", "").strip()
summary = summary.replace("Summary:", "").strip()
keywords = keywords.replace("Keywords:", "").strip()
full_text = full_text.replace("Full Text:", "").strip()
# Format each section with bold labels
formatted_text_num = "**Text #:** {}".format(text_num)
formatted_source = "**Source:** {}".format(source)
formatted_summary = "**Summary:** {}".format(summary)
formatted_keywords = "**Keywords:** {}".format(keywords)
# Display the formatted sections
st.markdown(formatted_text_num)
st.markdown(formatted_source)
st.markdown(formatted_summary)
st.markdown(formatted_keywords)
# Display the 'Full_Text' section with proper line breaks
st.markdown("**Full Text:**")
text_lines = full_text.split('\n')
for line in text_lines:
st.markdown(line.replace('\n', '<br>'))
def ask_nicolay():
def search_text_2(df, product_description, n=3, pprint=True):
product_embedding = get_embedding(
product_description,
engine="text-embedding-ada-002"
)
df["similarities"] = df.embedding.apply(lambda x: cosine_similarity(x, product_embedding))
# Select the first three rows of the sorted DataFrame
top_three = df.sort_values("similarities", ascending=False).head(3)
# If `pprint` is True, print the output
#if pprint:
#for _, row in top_three.iterrows():
#print(row["combined"])
#print()
# Return the DataFrame with the added similarity values
return top_three
# Q&A doc prompt with langchain with prompts for determining relevance and extracting quotations.
results_df = search_text_2(df, submission_text, n=3)
# Reset the index and create a new column "index"
results_df = results_df.reset_index()
# Access the values in the "similarities" and "combined" columns
similarity1 = results_df.iloc[0]["similarities"]
combined1 = str(results_df.iloc[0]["combined"])
similarity2 = results_df.iloc[1]["similarities"]
combined2 = str(results_df.iloc[1]["combined"])
similarity3 = results_df.iloc[2]["similarities"]
combined3 = str(results_df.iloc[2]["combined"])
num_rows = results_df.shape[0]
st.markdown("**Step 1 complete - identified the most semantically similar text sections.**")
# Iterate through the rows of the dataframe
for i in range(num_rows):
# Get the current row
row = results_df.iloc[i]
# working code - don't DELETE
with st.expander(label="Text Section " + str(i+1) + ":", expanded=False):
# Display each cell in the row as a separate block of text
st.markdown("**Question:**")
st.write(submission_text)
st.markdown("**Below is a section of the text along with its semantic similarity score. It is one of the three highest scoring sections in the text.**")
st.write(row['similarities'])
#combined_text = row['combined']
#combined_text = combined_text.replace('\\n\\n', '\n\n') # Convert plain string to actual newline characters
#text_lines = combined_text.split('\n\n')
#for line in text_lines:
#st.markdown(line.replace('\n', '<br>')) # Replace '\n' with '<br>' for line breaks in markdown
# end working code - don't DELETE
combined_text = row['combined']
combined_text = combined_text.replace('\\n\\n', '\n\n') # Convert plain string to actual newline characters
# Split the combined_text into sections
text_num, source, summary, keywords, full_text = combined_text.split('\n\n', 4)
# Remove the repeated labels from the values
text_num = text_num.replace("Text #:", "").strip()
source = source.replace("Source:", "").strip()
summary = summary.replace("Summary:", "").strip()
keywords = keywords.replace("Keywords:", "").strip()
full_text = full_text.replace("Full Text:", "").strip()
# Format each section with bold labels
formatted_text_num = "**Text #:** {}".format(text_num)
formatted_source = "**Source:** {}".format(source)
formatted_summary = "**Summary:** {}".format(summary)
formatted_keywords = "**Keywords:** {}".format(keywords)
# Display the formatted sections
st.markdown(formatted_text_num)
st.markdown(formatted_source)
st.markdown(formatted_summary)
st.markdown(formatted_keywords)
# Display the 'Full_Text' section with proper line breaks
st.markdown("**Full Text:**")
text_lines = full_text.split('\n')
for line in text_lines:
st.markdown(line.replace('\n', '<br>'))
#st.write("Step 1 complete - identified the most semantically similar text sections.")
#st.dataframe(results_df)
st.markdown("**Next step - relevancy check.**")
## k-shot prompts for relevance
currency_question = """2. Section:\n\nText #: 58:\n\nSource: Second Annual Message. December 1, 1862\n\nSummary: In his Second Annual Message, Abraham Lincoln discusses the execution of the new commercial treaty between the United States and the Sultan of Turkey, as well as commercial and consular treaties with Liberia and Haiti. He describes the favorable relations maintained with European and other foreign states and the improved relations with neighboring countries in the Americas. Lincoln also addresses the financial situation, noting the suspension of specie payments and the introduction of United States notes as a temporary measure. He suggests the organization of banking associations under an act of Congress as a solution for providing public funds and a safe, uniform currency. Furthermore, he mentions the importance of developing the mineral resources in the Territories and the potential benefits of an Atlantic telegraph connecting the United States with Europe.\n\nKeywords: Abraham Lincoln, Second Annual Message, December 1 1862, commercial treaty, Sultan of Turkey, Liberia, Haiti, foreign relations, Americas, finances, suspension of specie payments, United States notes, banking associations, mineral resources, Territories, Atlantic telegraph.\n\nFull Text:\n\n"The new commercial treaty between the United States and the Sultan of Turkey has been carried into execution.\n\nA commercial and consular treaty has been negotiated, subject to the Senate's consent, with Liberia; and a similar negotiation is now pending with the republic of Hayti. A considerable improvement of the national commerce is expected to result from these measures.\n\nOur relations with Great Britain, France, Spain, Portugal, Russia, Prussia, Denmark, Sweden, Austria, the Netherlands, Italy, Rome, and the other European states, remain undisturbed. Very favorable relations also continue to be maintained with Turkey, Morocco, China and Japan.\n\nDuring the last year there has not only been no change of our previous relations with the independent states of our own continent, but, more friendly sentiments than have heretofore existed, are believed to be entertained by these neighbors, whose safety and progress, are so intimately connected with our own. This statement especially applies to Mexico, Nicaragua, Costa Rica, Honduras, Peru, and Chile.\n\nThe commission under the convention with the republic of New Granada closed its session, without having audited and passed upon, all the claims which were submitted to it. A proposition is pending to revive the convention, that it may be able to do more complete justice. The joint commission between the United States and the republic of Costa Rica has completed its labors and submitted its report.\n\nI have favored the project for connecting the United States with Europe by an Atlantic telegraph, and a similar project to extend the telegraph from San Francisco, to connect by a Pacific telegraph with the line which is being extended across the Russian empire.\n\nThe Territories of the United States, with unimportant exceptions, have remained undisturbed by the civil war, and they are exhibiting such evidence of prosperity as justifies an expectation that some of them will soon be in a condition to be organized as States, and be constitutionally admitted into the federal Union.\n\nThe immense mineral resources of some of those Territories ought to be developed as rapidly as possible. Every step in that direction would have a tendency to improve the revenues of the government, and diminish the burdens of the people. It is worthy of your serious consideration whether some extraordinary measures to promote that end cannot be adopted. The means which suggests itself as most likely to be effective, is a scientific exploration of the mineral regions in those Territories, with a view to the publication of its results at home and in foreign countries---results which cannot fail to be auspicious.\n\nThe condition of the finances will claim your most diligent consideration. The vast expenditures incident to the military and naval operations required for the suppression of the rebellion, have hitherto been met with a promptitude, and certainty, unusual in similar circumstances, and the public credit has been fully maintained. The continuance of the war, however, and the increased disbursements made necessary by the augmented forces now in the field, demand your best reflections as to the best modes of providing the necessary revenue, without injury to business and with the least possible burdens upon labor.\nThe suspension of specie payments by the banks, soon after the commencement of your last session, made large issues of United States notes unavoidable. In no other way could the payment of the troops, and the satisfaction of other just demands, be so economically, or so well provided for. The judicious legislation of Congress, securing the receivability of these notes for loans and internal duties, and making them a legal tender for other debts , has made them an universal currency; and has satisfied, partially, at least, and for the time, the long felt want of an uniform circulating medium, saving thereby to the people, immense sums in discounts and exchanges.\n\nA return to specie payments, however, at the earliest period compatible with due regard to all interests concerned, should ever be kept in view. Fluctuations in the value of currency are always injurious, and to reduce these fluctuations to the lowest possible point will always be a leading purpose in wise legislation. Convertibility, prompt and certain convertibility into coin, is generally acknowledged to be the best and surest safeguard against them; and it is extremely doubtful whether a circulation of United States notes, payable in coin, and sufficiently large for the wants of the people, can be permanently, usefully and safely maintained.\n\nIs there, then, any other mode in which the necessary provision for the public wants can be made, and the great advantages of a safe and uniform currency secured?\n\nI know of none which promises so certain results, and is, at the same time, so unobjectionable, as the organization of banking associations, under a general act of Congress, well guarded in its provisions.\n\nTo such associations the government might furnish circulating notes, on the security of United States bonds deposited in the treasury. These notes, prepared under the supervision of proper officers, being uniform in appearance and security, and convertible always into coin, would at once protect labor against the evils of a vicious currency, and facilitate commerce by cheap and safe exchanges.\n\nA moderate reservation from the interest on the bonds would compensate the United States for the preparation and distribution of the notes and a general supervision of the system, and would lighten the burden of that part of the public debt employed as securities. The public credit, moreover, would be greatly improved, and the negotiation of new loans greatly facilitated by the steady market demand for government bonds which the adoption of the proposed system would create."\n\n3. Key Words: finances, suspension of specie payments, United States notes, banking associations, uniform currency\n\nRelevance Determination: Section_58: Relevant\n\n4. Relevance Explanation: The section is relevant to the question about currency reform because it addresses the financial situation, the suspension of specie payments, and the introduction of United States notes. Lincoln suggests the organization of banking associations under an act of Congress as a solution for providing public funds and a safe, uniform currency."""
japan_question = """2. Section:\n\nText #: 72\n\nSource: Fourth Annual Message. December 6, 1854.\n\nSummary: In Abraham Lincoln's Fourth Annual Message, he expresses gratitude for good health and abundant harvests. He discusses the United States' foreign relations, including maintaining neutrality in Mexico's civil war and involvement in various projects such as a river survey in Central America and the overland telegraph between America and Europe. He mentions the friendly relations with South American states, the ongoing civil war in the Spanish part of San Domingo, and the improvement in relations with Liberia. Lincoln also discusses the situation in China, where rebellion has been suppressed, and Japan, where progress has been made in performing treaty stipulations.\n\nKey Words: Fourth Annual Message, December 6, 1864, Abraham Lincoln, foreign relations, Mexico, civil war, Central America, river survey, overland telegraph, South American states, San Domingo, Liberia, China, Japan, treaty stipulations.\n\nFull Text:\n\n"Fellow-citizens of the Senate December 6, 1864 and House of Representatives:\n\nAgain the blessings of health and abundant harvests claim our profoundest gratitude to Almighty God.\n\nThe condition of our foreign affairs is reasonably satisfactory. \n\nMexico continues to be a theatre of civil war. While our political relations with that country have undergone no change, we have, at the same time, strictly maintained neutrality between the belligerents. \n\nAt the request of the states of Costa Rica and Nicaragua, a competent engineer has been authorized to make a survey of the river San Juan and the port of San Juan. It is a source of much satisfaction that the difficulties which for a moment excited some political apprehensions, and caused a closing of the inter-oceanic transit route, have been amicably adjusted, and that there is a good prospect that the route will soon be reopened with an increase of capacity and adaptation. We could not exaggerate either the commercial or the political importance of that great improvement.\n\nIt would be doing injustice to an important South American state not to acknowledge the directness, frankness, and cordiality with which the United States of Colombia have entered into intimate relations with this government. A claims convention has been constituted to complete the unfinished work of the one which closed its session in 1861. .\n\nThe new liberal constitution of Venezuela having gone into effect with the universal acquiescence of the people, the government under it has been recognized, and diplomatic intercourse with it has opened in a cordial and friendly spirit. The long-deferred Aves Island claim has been satisfactorily paid and discharged. \n\nMutual payments have been made of the claims awarded by the late joint commission for the settlement of claims between the United States and Peru. An earnest and cordial friendship continues to exist between the two countries, and such efforts as were\n\nin my power have been used to remove misunderstanding and avert a threatened war between Peru and Spain. \n\nOur relations are of the most friendly nature with Chile, the Argentine Republic, Bolivia, Costa Rica, Paraguay, San Salvador, and Hayti.\n\nDuring the past year no differences of any kind have arisen with any of those republics, and, on the other hand, their sympathies with the United States are constantly expressed with cordiality and earnestness.\n\nThe claim arising from the seizure of the cargo of the brig Macedonian in 1821 has been paid in full by the government of Chile. \n\nCivil war continues in the Spanish part of San Domingo, apparently without prospect of an early close.\n\nOfficial correspondence has been freely opened with Liberia, and it gives us a pleasing view of social and political progress in that Republic. It may be expected to derive new vigor from American influence, improved by the rapid disappearance of slavery in the United States.\n\nI solicit your authority to furnish to the republic a gunboat at moderate cost, to be reimbursed to the United States by instalments. Such a vessel is needed for the safety of that state against the native African races; and in Liberian hands it would be more effective in arresting the African slave trade than a squadron in our own hands. The possession of the least organized naval force would stimulate a generous ambition in the republic, and the confidence which we should manifest by furnishing it would win forbearance and favor towards the colony from all civilized nations.\n\nThe proposed overland telegraph between America and Europe, by the way of Behring's Straits and Asiatic Russia, which was sanctioned by Congress at the last session, has been undertaken, under very favorable circumstances, by an association of American citizens, with the cordial good-will and support as well of this government as of those of Great Britain and Russia. Assurances have been received from most of the South American States of their high appreciation of the enterprise, and their readiness to co-operate in constructing lines tributary to that world-encircling communication. I learn, with much satisfaction, that the noble design of a telegraphic communication between the eastern coast of America and Great Britain has been renewed with full expectation of its early accomplishment.\n\nThus it is hoped that with the return of domestic peace the country will be able to resume with energy and advantage its former high career of commerce and civilization.\n\nOur very popular and estimable representative in Egypt died in April last. An unpleasant altercation which arose between the temporary incumbent of the office and the government of the Pacha resulted in a suspension of intercourse. The evil was promptly corrected on the arrival of the successor in the consulate, and our relations with Egypt, as well as our relations with the Barbary powers, are entirely satisfactory.\n\nThe rebellion which has so long been flagrant in China, has at last been suppressed, with the co-operating good offices of this government, and of the other western commercial states. The judicial consular establishment there has become very difficult and onerous, and it will need legislative revision to adapt it to the extension of our commerce, and to the more intimate intercourse which has been instituted with the government and people of that vast empire. China seems to be accepting with hearty good-will the conventional laws which regulate commercial and social intercourse among the western nations.\n\nOwing to the peculiar situation of Japan, and the anomalous form of its government, the action of that empire in performing treaty stipulations is inconstant and capricious. Nevertheless, good progress has been effected by the western powers, moving with enlightened concert. Our own pecuniary claims have been allowed, or put in course of settlement, and the inland sea has been reopened to commerce. There is reason also to believe that these proceedings have increased rather than diminished the friendship of Japan towards the United States."\n\n3.Key Words: Japan, treaty stipulations, foreign relations, Fourth Annual Message.\n\nRelevance Determination: Section_72: Relevant\n\Relevance Explanation: In this section of Lincoln's Fourth Annual Message, he discusses the situation in Japan. He mentions that progress has been made in performing treaty stipulations and that the inland sea has been reopened to commerce. He also states that the proceedings have likely increased the friendship of Japan towards the United States."""
railroad_question = """2. Section: \n\nText #: 71\n\nSource: Third Annual Message. December 8, 1863.\n\n"But why any proclamation now upon this subject? This question is beset with the conflicting views that the step might be delayed too long or be taken too soon. In some States the elements for resumption seem ready for action, but remain inactive, apparently for want of a rallying point---a plan of action. Why shall A adopt the plan of B, rather than B that of A? And if A and B should agree, how can they know but that the general government here will reject their plan? By the proclamation a plan is presented which may be accepted by them as a rallying point, and which they are assured in advance will not be rejected here. This may bring them to act sooner than they otherwise would.\n\nThe objections to a premature presentation of a plan by the national Executive consists in the danger of committals on points which could be more safely left to further developments. Care has been taken to so shape the document as to avoid embarrassments from this source. Saying that, on certain terms, certain classes will be pardoned, with rights restored, it is not said that other classes, or other terms, will never be included. Saying that reconstruction will be accepted if presented in a specified way, it is not said it will never be accepted in any other way.\n\nThe movements, by State action, for emancipation in several of the States, not included in the emancipation proclamation, are matters of profound gratulation. And while I do not repeat in detail what I have hertofore so earnestly urged upon this subject, my general views and feelings remain unchanged; and I trust that Congress will omit no fair opportunity of aiding these important steps to a great consummation.\n\nIn the midst of other cares, however important, we must not lose sight of the fact that the war power is still our main reliance. To that power alone can we look, yet for a time, to give confidence to the people in the contested regions, that the insurgent power will not again overrun them. Until that confidence shall be established, little can be done anywhere for what is called reconstruction. Hence our chiefest care must still be directed to the army and navy, who have thus far borne their harder part so nobly and well. And it may be esteemed fortunate that in giving the greatest efficiency to these indispensable arms, we do also honorably recognize the gallant men, from commander to sentinel, who compose them, and to whom, more than to others, the world must stand indebted for the home of freedom disenthralled, regenerated, enlarged, and perpetuated."\n\nSummary: In this portion of the Third Annual Message, President Lincoln addresses the importance of presenting a plan for the resumption of national authority within States where it has been suspended. He argues that by providing a rallying point, states can act sooner to initiate reconstruction. He also expresses his satisfaction with the movements towards emancipation in states not covered by the Emancipation Proclamation and urges Congress to support these efforts. Lincoln emphasizes that the war power, represented by the army and navy, is still the primary means to establish confidence in contested regions and prevent the insurgent power from overrunning them. He acknowledges the essential role of the military in securing freedom and promoting reconstruction.\n\n3. Key Words: None directly related to railroad construction.\n\nRelevance Determination: Section_71: Irrelevant\n\n5. Relevance Explanation: The section does not address the topic of railroad construction. Instead, it focuses on the resumption of national authority within states, emancipation movements in states not covered by the Emancipation Proclamation, and the importance of the army and navy in securing freedom and promoting reconstruction."""
examples = [
{"question": "1. Question: What points does Lincoln make about currency reform?", "output": currency_question},
{"question": "1. Question Does Lincoln discuss Japan, and if so, what is the context of this discussion?", "output": japan_question},
{"question": "1. Question: What points does Lincoln make about railroad construction?", "output": railroad_question}
]
prompt_instructions = "You are an AI expert on presidential history with a specialty on the life and times of Abraham Lincoln. In this exercise you are given a user supplied question, a Section of the Text, and a Method for determining the Section’s relevance to the Question. Your objective is to determine whether that Section of the text is directly and specifically relevant to the user question. You will use the Method below to fulfill this objective, taking each step by step.\n\nHere is your Method.\nMethod: Go step by step in answering the question.\n1. Question: You will be provided with a user question.\n2. Section: You will be given a section of the text from a speech by Abraham Lincoln, accompanied by a summary of that section and keywords associated with the text.\n3. Key Words: Identify key words in the Section that are specifically and directly related to the Question. Such key words could include specific locations, events, or people mentioned in the Section.\n4. Relevance Determination: Based on your review of the earlier steps in the Method, determine whether the section is relevant, and gauge your confidence (high, medium, low, or none) in this determination. High determination is specifically and directly related to the Question. If the section is relevant and ranked high, write ‘'Section_x: Relevant'. Otherwise, if the section is not relevant and the determination is less than high, write 'Section_x: Irrelevant'.\n5. Relevance Explanation: Based on your review in the earlier steps in the Method, explain why the Section’s relevance to the Question.\nLet’s begin.\n"
example_prompt = SystemMessagePromptTemplate.from_template(prompt_instructions)
human_message_prompt = HumanMessagePromptTemplate.from_template("Question: {question}\nKey Terms:")
chat_prompt = ChatPromptTemplate.from_messages([example_prompt, human_message_prompt])
chat = ChatOpenAI(temperature=0, model_name=model_select)
chain = LLMChain(llm=chat, prompt=chat_prompt)
r_check_1 = chain.run(question=str(submission_text + "\n2. Section:\n " + combined1))
#print(r_check_1)
r_check_2 = chain.run(question=str(submission_text + "\n2. Section:\n " + combined2))
#print(r_check_2)
r_check_3 = chain.run(question=str(submission_text + "\n2. Section:\n " + combined3))
#print(r_check_3
st.markdown("**Step 2 complete: Nicolay's has made relevancy checks on the text sections.**")
# combined function for combining sections + outputs, and then filtering via regex for relevant sections
combined_df = pd.DataFrame(columns=['output', 'r_check'])
combined_df['output'] = [combined1, combined2, combined3]
combined_df['r_check'] = [r_check_1, r_check_2, r_check_3]
for i in range(num_rows):
# Get the current row
row = combined_df.iloc[i]
# working code - don't DELETE
with st.expander(label="Relevance Check " + str(i+1) + ":", expanded=False):
# Display each cell in the row as a separate block of text
st.markdown("**1. Question:**")
st.write(submission_text)
#st.markdown("**2. Relevance Check:")
#st.markdown(combined_text_1)
st.markdown("**Text Information:**")
#st.write(row['similarities'])
#combined_text = row['combined']
#combined_text = combined_text.replace('\\n\\n', '\n\n') # Convert plain string to actual newline characters
#text_lines = combined_text.split('\n\n')
#for line in text_lines:
#st.markdown(line.replace('\n', '<br>')) # Replace '\n' with '<br>' for line breaks in markdown
# end working code - don't DELETE
#combined_text_0 = row['output']
#combined_text_0 = combined_text_0.replace('\\n\\n', '\n\n') # Convert plain string to actual newline characters
# Split the combined_text into sections
#text_num, source, summary, keywords, full_text = combined_text_0.split('\n\n', 4)
# Remove the repeated labels from the values
#text_num = text_num.replace("Text #:", "").strip()
#source = source.replace("Source:", "").strip()
#summary = summary.replace("Summary:", "").strip()
#keywords = keywords.replace("Keywords:", "").strip()
#full_text = full_text.replace("Full Text:", "").strip()
# Format each section with bold labels
#formatted_text_num = "**Text #:** {}".format(text_num)
#formatted_source = "**Source:** {}".format(source)
#formatted_summary = "**Summary:** {}".format(summary)
#formatted_keywords = "**Keywords:** {}".format(keywords)
# Display the formatted sections
#st.markdown(formatted_text_num)
#st.markdown(formatted_source)
#st.markdown(formatted_summary)
#st.markdown(formatted_keywords)
# Display the 'Full_Text' section with proper line breaks
#st.markdown("**Full Text:**")
#text_lines = full_text.split('\n')
#for line in text_lines:
#st.markdown(line.replace('\n', '<br>'))
#combined_text_1 = row['r_check']
#combined_text_1 = combined_text_1.replace('\\n\\n', '\n\n')
# Split the combined_text into sections
#relevance_determination, relevance_explanation = combined_text_1.split('\n\n', 1)
# Remove the repeated labels from the values
#relevance_determination = relevance_determination.replace("3. Relevance Determination: ", "").strip()
#relevance_explanation = relevance_explanation.replace("4. Relevance Explanation: ", "").strip()
#formatted_relevance_determination = "**3. Relevance Determination:** {}".format(relevance_determination)
#formatted_relevance_explanation = "**4. Relevance Explanation:** {}".format(relevance_explanation)
#st.markdown(formatted_relevance_determination)
#st.markdown(formatted_relevance_explanation)
# working code - don't DELETE
# begin snippet
combined_text_0 = row['output']
combined_text_0 = combined_text_0.replace('\\n\\n', '\n\n') # Convert plain string to actual newline characters
text_num, source, summary, keywords, full_text = combined_text_0.split('\n\n', 4)
text_num = text_num.replace("Text #:", "").strip()
source = source.replace("Source:", "").strip()
summary = summary.replace("Summary:", "").strip()
keywords = keywords.replace("Keywords:", "").strip()
full_text = full_text.replace("Full Text:", "").strip()
formatted_text_num = "**Text #:** {}".format(text_num)
formatted_source = "**Source:** {}".format(source)
formatted_summary = "**Summary:** {}".format(summary)
formatted_keywords = "**Keywords:** {}".format(keywords)
st.markdown(formatted_text_num)
st.markdown(formatted_source)
st.markdown(formatted_summary)
st.markdown(formatted_keywords)
st.markdown("**Full Text:**")
text_lines = full_text.split('\n')
for line in text_lines:
st.markdown(line.replace('\n', '<br>'))
combined_text_1 = row['r_check']
combined_text_1 = combined_text_1.replace('\\n\\n', '\n\n')
# Try to split the text into three sections
#split_text = combined_text_1.split('\n\n', 2)
# Check if the split was successful
# existing code
# begin snippet
#if len(split_text) == 3:
# _, relevant_keywords, relevance_explanation = split_text
#else:
# If the split wasn't successful, try splitting the text differently
# split_text = combined_text_1.split('\n\n', 1)
# if len(split_text) == 2:
# relevant_keywords, relevance_explanation = split_text
# else:
# If splitting is still unsuccessful, set empty values to avoid errors
# relevant_keywords, relevance_explanation = "", ""
#relevant_keywords = relevant_keywords.replace("3. Key Words: ", "").strip()
#relevance_explanation = relevance_explanation.replace("4. Relevance Explanation: ", "").strip()
#formatted_relevant_keywords = "**3. Key Words:** {}".format(relevant_keywords)
# Extract relevance determination from the beginning of the relevance explanation
#relevance_determination = relevance_explanation.split('\n', 1)[0].strip().replace("4. Relevance Determination: ", "")
# Remove relevance determination from the relevance explanation
#relevance_explanation = relevance_explanation.replace(relevance_determination, "").strip()
#formatted_relevance_determination = "**4. Relevance Determination:** {}".format(relevance_determination)
#formatted_relevance_explanation = "**5. Relevance Explanation:** {}".format(relevance_explanation)
#st.markdown(formatted_relevant_keywords)
#st.markdown(formatted_relevance_determination)
#st.markdown(formatted_relevance_explanation)
st.markdown(combined_text_1)
# end snippet
#st.dataframe(combined_df)
st.markdown("**Next step: Answer the user question with supporting quotations from the relevant texts.**")
# Use the re.IGNORECASE flag to make the regular expression case-insensitive
#regex = re.compile(r'Relevance Explanation.*?(relevant)', re.IGNORECASE | re.DOTALL)
# Replace the original regex pattern with the updated one
#regex = re.compile(r'Relevance Determination.*?:\s*(Relevant)', re.IGNORECASE | re.DOTALL)
# Modify the regex pattern to include both "Relevance Determination" and "Relevance Explanation"
#regex = re.compile(r'Relevance (?:Determination|Explanation).*?:\s*(Relevant)', re.IGNORECASE | re.DOTALL)
# Modify the regex pattern to include the case when there is no "Relevance Determination" or "Relevance Explanation" string
#regex = re.compile(r'Section_.*?:\s*(Relevant)(?:\.|,).*?Relevance Explanation.*', re.IGNORECASE | re.DOTALL)
# Modify the regex pattern to include the optional "Relevance Determination" string followed by any characters and a newline character
# Modify the regex pattern to include the optional "Key Words:" string followed by any characters and a newline character
# Modify the regex pattern to accommodate multiple newline characters between "Relevance Determination:" and "Section_"
# Update the regex pattern to make the entire pattern case-insensitive
#regex = re.compile(r'(?i)Section_.*?:\s*(Relevant)(?:\s*\(.+?\))?(?:\.|,)', re.DOTALL)
# Apply the regex pattern to the 'r_check' column and store the results in a new 'mask' column
#combined_df['mask'] = combined_df['r_check'].str.contains(regex)
# Apply the regex pattern to the 'r_check' column using the str.match() function
#combined_df['mask'] = combined_df['r_check'].str.match(regex)
# Create a second mask to capture "this is relevant"
#combined_df['second_mask'] = combined_df['r_check'].str.contains(r'this section is relevant', flags=re.IGNORECASE)
# Combine the two masks using the bitwise OR operator (|) and store the result in the 'mask' column
#combined_df['mask'] = combined_df['mask'] | combined_df['second_mask']
# Filter the combined dataframe to include only rows where the 'mask' column is True
#relevant_df = combined_df.loc[combined_df['mask']].copy()
# Modified regex pattern
# Updated regex pattern
#regex = re.compile(r'(?i)(?:\d+\.?\s*)?Relevance Determination:\s*(?:\n|\r\n)?\s*Section_\s*\d+\s*[:=]\s*(?:\n|\r\n)?\s*(Relevant)(?:\s*\(.+?\))?(?:\.|,)?', re.DOTALL)
regex = re.compile(r'(?i)(?:\d+\.?\s*)?Relevance Determination:\s*(?:\n|\r\n)?\s*(High\s*)?Section_\s*\d+\s*[:=]\s*(?:\n|\r\n)?\s*(Relevant)(?:\s*\(.+?\))?(?:\.|,)?', re.DOTALL)
# Apply the regex pattern to the 'r_check' column using the str.contains() function
combined_df['mask'] = combined_df['r_check'].str.contains(regex)
# Create a second mask to capture "this section is relevant"
combined_df['second_mask'] = combined_df['r_check'].str.contains(r'this section is relevant', flags=re.IGNORECASE)
# Combine the two masks using the bitwise OR operator (|) and store the result in the 'mask' column
combined_df['mask'] = combined_df['mask'] | combined_df['second_mask']
# Filter the combined dataframe to include only rows where the 'mask' column is True
relevant_df = combined_df.loc[combined_df['mask']].copy()
# Check if there are any rows in the relevant_df dataframe
if relevant_df.empty:
# If there are no rows, print the desired message
st.write("No relevant sections identified.")
else:
# Otherwise, continue with the rest of the script
def combine_strings(row):
return row['output'] + '\nKey Terms\n' + row['r_check']
# Use the apply function to apply the combine_strings function to each row of the relevant_df dataframe
# and assign the result to the 'combined_string' column
relevant_df['combined_string'] = relevant_df.apply(combine_strings, axis=1)
final_sections = relevant_df['combined_string']
#final_sections.to_csv('final_sections.csv')
evidence_df = pd.DataFrame(final_sections)
evidence = '\n\n'.join(evidence_df['combined_string'])
#evidence_df.to_csv('evidence.csv')
#print(evidence)
# Filter the relevant_df dataframe to include only the 'output' column
output_df = relevant_df[['output']]
# Convert the dataframe to a dictionary
output_dict = output_df.to_dict('records')
# Extract the values from the dictionary using a list comprehension
output_values = [d['output'] for d in output_dict]
# Print the output values to see the results
#print(output_values)
# begin quotation identiftication and answer prompt
cherokee_question = "2. Text:\n\n\nText Number: 59\nSource: Second Annual Message. December 1, 1863\n\nThe Indian tribes upon our frontiers have, during the past year, manifested a spirit of insubordination, and, at several points, have engaged in open hostilities against the white settlements in their vicinity. The tribes occupying the Indian country south of Kansas, renounced their allegiance to the United States, and entered into treaties with the insurgents. Those who remained loyal to the United States were driven from the country. The chief of the Cherokees has visited this city for the purpose of restoring the former relations of the tribe with the United States. He alleges that they were constrained, by superior force, to enter into treaties with the insurgents, and that the United States neglected to furnish the protection which their treaty stipulations required.\n\n3. Compose Initial Answer: Lincoln regarded the Cherokee as a tribe that had been forced into renouncing their allegiance to the United States and entering into treaties with the Confederacy due to superior force and neglect on the part of the United States.\n\n4. Identify Supporting Quote: \"The chief of the Cherokees has visited this city for the purpose of restoring the former relations of the tribe with the United States. He alleges that they were constrained, by superior force, to enter into treaties with the insurgents, and that the United States neglected to furnish the protection which their treaty stipulations required.\" (Second Annual Message. December 1, 1863. (Text 59)\n\n5. Combined Answer with Supporting Quote: Lincoln discusses the Cherokee in his Second Annual Message of December 1, 1863. Lincoln notes notes the visit of the Cherokee chief to Washington D.C. “for the purpose of restoring the former relations of the tribe with the United States.” The Cherokee were “constrained, by superior force, to enter into treaties with the Confederacy.” Furthermore, the chief alleged “that the United States neglected to furnish the protection which their treaty stipulations required.” (Second Annual Message. December 1, 1863. Text 59)\n",
japan_question = "2. Text\n\nText Number#: 72: \nSource: Fourth Annual Message. December 6, 1864.\n\nOwing to the peculiar situation of Japan, and the anomalous form of its government, the action of that empire in performing treaty stipulations is inconstant and capricious. Nevertheless, good progress has been effected by the western powers, moving with enlightened concert. Our own pecuniary claims have been allowed, or put in course of settlement, and the inland sea has been reopened to commerce. There is reason also to believe that these proceedings have increased rather than diminished the friendship of Japan towards the United States.\n\nCompose Initial Answer: Yes, Lincoln discusses Japan in his Fourth Annual Message of December 6, 1854, stating that the peculiar situation and anomalous form of government of Japan have made their actions in performing treaty stipulations inconsistent and capricious. However, he notes that progress has been made by the western powers in working together and that our own pecuniary claims have been allowed or put in settlement. Additionally, the inland sea has been reopened to commerce, and these proceedings have likely increased Japan's friendship towards the United States.\n\nIdentify Supporting Quote: \"Owing to the peculiar situation of Japan, and the anomalous form of its government, the action of that empire in performing treaty stipulations is inconstant and capricious... There is reason also to believe that these proceedings have increased rather than diminished the friendship of Japan towards the United States.\" (Fourth Annual Message, December 6, 1864. Text Number 72).\n\nCombined Answer with Supporting Quote: Yes, Lincoln discusses Japan in his Fourth Annual Message of December 6, 1864. Lincoln acknowledged that \"the action of [Japan] in performing treaty stipulations is inconstant and capricious\" due to their \"peculiar situation\" and \"anomalous form of government.\" However, he also noted that \"good progress has been effected by the western powers, moving with enlightened concert,\" as evidenced by the settlement of the United States' pecuniary claims and the reopening of the inland sea to commerce. Lincoln further suggested that these efforts had \"increased rather than diminished the friendship of Japan towards the United States.\" Thus, this message reflects Lincoln's recognition of the importance of international cooperation and diplomacy in navigating complex political and cultural landscapes such as that of Japan during the Late Tokugawa period. (Fourth Annual Message, December 6, 1864. Text Number 72).\n"
examples = [
{"question": "1. Question: How did Lincoln regard the Cherokee?", "output": cherokee_question},
{"question": "1. Question: Does Lincoln discuss Japan, and if so, what is the context of this discussion?", "output": japan_question}
]
prompt_instructions ="You are an AI question-answerer and quotation-selector. The focus of your expertise is interpreting the historic writings of Abraham Lincoln. In this exercise you will first be given a user question, a Section of a Lincoln writing, and a Method for answering the question and supporting it with an appropriate quotation from the Section. In following this Method you will complete each step by step until finished.\nHere is your Method.\nMethod: Go step by step in the question.\n1. Question: You will be provided with a user question.\n2. Text: You will be given a section from a Text written by Abraham Lincoln. The Text contains the Text Number, the Source of the Text, and the original prose by Lincoln. \n3. Compose Initial Answer: Based on the Question and information provided in the Text, compose a historically accurate Initial Answer to that Question. The Initial Answer should be incisive, brief, and well-written.\n4. Identify Supporting Quote: Based on the Answer, select a Supporting Quote from the Text that supports that Answer. Select the briefest and most relevant Supporting Quote possible. You can also use paraphrasing to further shorten the Supporting Quote. Provide a citation at the end of the Supporting Quote, in the following manner: (Source, Text Number).\n5. Combined Answer with Supporting Quote: Rewrite the Initial Answer to incorporate the Supporting Quote. This Combined Answer should be historically accurate, and demonstrating a writing style that is incisive, brief, and well-written. All Quotes used should be cited using the method above.\n\nLet’s begin.\n"
example_prompt = SystemMessagePromptTemplate.from_template(prompt_instructions)
human_message_prompt = HumanMessagePromptTemplate.from_template("Question: {question}\nKey Terms:")
chat_prompt = ChatPromptTemplate.from_messages([example_prompt, human_message_prompt])
chat = ChatOpenAI(temperature=0, model_name=model_select)
chain = LLMChain(llm=chat, prompt=chat_prompt)
# Create an empty list to store the final_analysis results
final_analysis_results = []
# Iterate over the output_values list
for output_value in output_values:
# Run the final_analysis step and store the result in a variable
final_analysis = chain.run(submission_text+output_value)
# Add the final_analysis result to the list
final_analysis_results.append(final_analysis)
# Create a Pandas dataframe from the output_values list
final_analysis_df = pd.DataFrame({'output_values': output_values, 'final_analysis': final_analysis_results})
# Save the dataframe to a CSV file
#final_analysis_df.to_csv('final_analysis.csv', index=False)
st.subheader("Nicolay's Final Analysis:")
st.markdown("**Step 3 complete: Here are Nicolay's analysis of Lincoln's speeches based on your question. Click on the dataframe boxes below to see the full outputs.**")
#st.dataframe(final_analysis_df)
#st.write('\n\n')
#for i in range(len(final_analysis_df)):
# Get the current row
#row = final_analysis_df.iloc[i]
for i in range(len(final_analysis_df)):
# Get the current row
row = final_analysis_df.iloc[i]
# working code - don't DELETE
with st.expander(label="Nicolay's Response: " + str(i+1) + ":", expanded=False):
# Display each cell in the row as a separate block of text
st.markdown("**1. Question:**")
st.write(submission_text)
st.write("**2. Answer:**")
combined_text_x = row['final_analysis']
combinex_text_x = combined_text_x.replace('\\n\\n', '\n\n') # Convert plain string to actual newline characters
# Find the index of "Combined Answer with Supporting Quote:" and display the text after it
start_index = combined_text_x.find("Combined Answer with Supporting Quote:") + len("Combined Answer with Supporting Quote:")
answer_text = combined_text_x[start_index:].strip()
st.markdown(answer_text)
# working code - don't DELETE
#with st.expander(label="Nicolay's Response: " + str(i) + ":", expanded=False):
# Display each cell in the row as a separate block of text
#st.markdown("**1. Question:**")
#st.write(submission_text)
#st.write("**2. Answer:**")
#combined_text_x = row['final_analysis']
#combinex_text_x = combined_text_x.replace('\\n\\n', '\n\n') # Convert plain string to actual newline characters
#st.markdown(combined_text_x)
#st.markdown("**3. Text Information:**")
combined_text_0 = row['output_values']
combined_text_0 = combined_text_0.replace('\\n\\n', '\n\n') # Convert plain string to actual newline characters
text_num, source, summary, keywords, full_text = combined_text_0.split('\n\n', 4)
text_num = text_num.replace("Text #:", "").strip()
source = source.replace("Source:", "").strip()
summary = summary.replace("Summary:", "").strip()
keywords = keywords.replace("Keywords:", "").strip()
full_text = full_text.replace("Full Text:", "").strip()
formatted_text_num = "**Text #:** {}".format(text_num)
formatted_source = "**Source:** {}".format(source)
formatted_summary = "**Summary:** {}".format(summary)
formatted_keywords = "**Keywords:** {}".format(keywords)
st.markdown(formatted_text_num)
st.markdown(formatted_source)
st.markdown(formatted_summary)
st.markdown(formatted_keywords)
st.markdown("**Full Text:**")
text_lines = full_text.split('\n')
for line in text_lines:
st.markdown(line.replace('\n', '<br>'))
if search_method == semantic_search:
embeddings_search()
else:
ask_nicolay()
| [
"[PLACEHOLDER, PLACEHOLDER]",
"You are an AI question-answerer and quotation-selector. The focus of your expertise is interpreting the historic writings of Abraham Lincoln. In this exercise you will first be given a user question, a Section of a Lincoln writing, and a Method for answering the question and supporting it with an appropriate quotation from the Section. In following this Method you will complete each step by step until finished.\nHere is your Method.\nMethod: Go step by step in the question.\n1. Question: You will be provided with a user question.\n2. Text: You will be given a section from a Text written by Abraham Lincoln. The Text contains the Text Number, the Source of the Text, and the original prose by Lincoln. \n3. Compose Initial Answer: Based on the Question and information provided in the Text, compose a historically accurate Initial Answer to that Question. The Initial Answer should be incisive, brief, and well-written.\n4. Identify Supporting Quote: Based on the Answer, select a Supporting Quote from the Text that supports that Answer. Select the briefest and most relevant Supporting Quote possible. You can also use paraphrasing to further shorten the Supporting Quote. Provide a citation at the end of the Supporting Quote, in the following manner: (Source, Text Number).\n5. Combined Answer with Supporting Quote: Rewrite the Initial Answer to incorporate the Supporting Quote. This Combined Answer should be historically accurate, and demonstrating a writing style that is incisive, brief, and well-written. All Quotes used should be cited using the method above.\n\nLet’s begin.\n",
"You are an AI expert on presidential history with a specialty on the life and times of Abraham Lincoln. In this exercise you are given a user supplied question, a Section of the Text, and a Method for determining the Section’s relevance to the Question. Your objective is to determine whether that Section of the text is directly and specifically relevant to the user question. You will use the Method below to fulfill this objective, taking each step by step.\n\nHere is your Method.\nMethod: Go step by step in answering the question.\n1. Question: You will be provided with a user question.\n2. Section: You will be given a section of the text from a speech by Abraham Lincoln, accompanied by a summary of that section and keywords associated with the text.\n3. Key Words: Identify key words in the Section that are specifically and directly related to the Question. Such key words could include specific locations, events, or people mentioned in the Section.\n4. Relevance Determination: Based on your review of the earlier steps in the Method, determine whether the section is relevant, and gauge your confidence (high, medium, low, or none) in this determination. High determination is specifically and directly related to the Question. If the section is relevant and ranked high, write ‘'Section_x: Relevant'. Otherwise, if the section is not relevant and the determination is less than high, write 'Section_x: Irrelevant'.\n5. Relevance Explanation: Based on your review in the earlier steps in the Method, explain why the Section’s relevance to the Question.\nLet’s begin.\n",
"Question: {question}\nKey Terms:"
] |
2024-01-10 | maribol/langchain-agents | tools~scrape_website.py |
# Import necessary modules
from dotenv import load_dotenv
from pydantic import BaseModel, Field
from langchain.tools import BaseTool
from bs4 import BeautifulSoup
from typing import Type
import requests
import json
import os
# Load environmental variables
load_dotenv()
# Get browserless API key from environmental variables
brwoserless_api_key = os.getenv("BROWSERLESS_API_KEY")
def scrape_website(objective: str, url: str):
"""Scrape appropriate data from website url."""
# Establish headers for the request
headers = {
'Cache-Control': 'no-cache',
'Content-Type': 'application/json',
}
# Establish data for the request
data = {"url": url}
data_json = json.dumps(data)
# Set post url and send post request
post_url = f"https://chrome.browserless.io/content?token={brwoserless_api_key}"
response = requests.post(post_url, headers=headers, data=data_json)
# Parse response, return text or error message
if response.status_code == 200:
soup = BeautifulSoup(response.content, "html.parser")
text = soup.get_text()
if len(text) > 10000:
output = summary(objective, text)
return output
else:
return text
else:
return (f"Could not scrape this website. Got status code {response.status_code}.")
class ScrapeWebsiteInput(BaseModel):
"""Define inputs schema for scrape_website function"""
objective: str = Field(description="The objective & task that users give to the agent")
url: str = Field(description="The url of the website to be scraped")
class ScrapeWebsiteTool(BaseTool):
"""Define a class for the 'scrape_website' tool"""
name = "scrape_website"
description = "Useful when you need to get data from a website url, passing both url and objective to the function; DO NOT make up any url, the url should only be from the search results"
args_schema: Type[BaseModel] = ScrapeWebsiteInput
def _run(self, objective: str, url: str):
"""Run the scrape website function when called."""
return scrape_website(objective, url)
def _arun(self, url: str):
"""Raise an error if _arun method is called as it is not implemented."""
raise NotImplementedError("This method has not been implemented.") | [
"Useful when you need to get data from a website url, passing both url and objective to the function; DO NOT make up any url, the url should only be from the search results"
] |
2024-01-10 | xunafay/discord-assistants | whisper.py | import openai
import argparse
def transcribe_audio_with_openai(audio_file):
client = openai.OpenAI()
with open(audio_file, "rb") as audio:
# Call the OpenAI API to transcribe the audio file
# OpenAI Python client library handles the file upload
transcript = client.audio.transcriptions.create(
model="whisper-1",
file=audio
)
return transcript.text
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file', help='Path to the audio file')
args = parser.parse_args()
if args.file:
audio_file = args.file
else:
print("Please provide the path to the audio file using the --file flag.")
exit(1)
# Transcribe the audio file using OpenAI API
transcript = transcribe_audio_with_openai(audio_file)
# Print the transcribed audio
print(transcript)
| [] |
2024-01-10 | XuZhao0/Model-Selection-Reasoning | src~selection_date.py | import json
import os
import openai
import random
import time
from datetime import datetime
import argparse
import re
from tqdm import tqdm
from typing import Union
from prompts import date_prompt
from collections import OrderedDict
from tool import *
def get_user_assistant_messages(system_message: str, user_message: str, assistant_message: str):
'''
This function is used to convert the prompt into the message format used by OpenAI Chat API.
'''
messages = []
messages.append({"role": "system", "content": system_message})
split_user_messages = user_message.split('\n\n\n\n')
split_assistant_messages = assistant_message.split('\n\n\n\n')
for i in range(len(split_user_messages)):
question = split_user_messages[i]
answer = split_assistant_messages[i]
messages += [
{"role": "user", "content": f"{question}"},
{"role": "assistant", "content": f"{answer}"},
]
return messages
def get_cot_prompt(data: dict):
'''
This function is used to generate the prompt for CoT.
'''
system_message = date_prompt.TURBO_COT_SYSTEM
user_message = date_prompt.TURBO_COT_USER
assistant_message = date_prompt.TURBO_COT_ASSISTANT
messages = get_user_assistant_messages(
system_message, user_message, assistant_message)
question_message = data['question']
messages += [{"role": "user", "content": f"Question: {question_message}"}]
return messages
def get_pal_prompt(data: dict):
'''
This function is used to generate the prompt for PAL.
'''
system_message = date_prompt.TURBO_PAL_SYSTEM
user_message = date_prompt.TURBO_PAL_USER
assistant_message = date_prompt.TURBO_PAL_ASSISTANT
messages = get_user_assistant_messages(
system_message, user_message, assistant_message)
question_message = data['question']
messages += [{"role": "user",
"content": f"Question: {question_message}\n\n# solution in Python"}]
return messages
def get_select_prompt(data: dict, cot_solution: list, pal_solution: list):
'''
This function is used to generate the prompt for selection.
'''
system_message = date_prompt.TURBO_SELECT_SYSTEM
user_message = date_prompt.TURBO_SELECT_USER
assistant_message = date_prompt.TURBO_SELECT_ASSISTANT
messages = get_user_assistant_messages(
system_message, user_message, assistant_message)
try:
pal_generated_list = pal_solution[0].split('"""')
pal_generated = pal_generated_list[0].strip(
) + pal_generated_list[2]
except Exception as e:
pal_generated = pal_solution[0]
if cot_solution[0].startswith('Answer:'):
cot_generated = cot_solution[0]
else:
cot_generated = 'Answer:\n' + cot_solution[0]
user_message = f'''Math problem: {data['question'].strip()}
(A)
{cot_generated.strip()}
(B)
{pal_generated.strip()}
Which of the above two choices can correctly answer the math problem?'''
messages += [{"role": "user", "content": user_message}]
return messages
def query_cot(data: dict, key: str, cot_temperature: float, backbone: str):
'''
This function is used to query OpenAI for CoT solutions.
Args:
data: a dict containing the question and answer
key: the OpenAI API key
cot_temperature: the temperature used in CoT
backbone: ChatGPT or GPT-4
Returns:
completions: a list containing the CoT solution
'''
query_message = get_cot_prompt(data)
if backbone == 'gpt4':
model_name = 'gpt-4'
elif backbone == 'chatgpt':
model_name = 'gpt-3.5-turbo'
start_time = time.time()
completions = []
while True:
try:
cot_solution = openai.ChatCompletion.create(
api_key=key,
model=model_name,
max_tokens=500,
stop='\n\n\n',
messages=query_message,
temperature=cot_temperature,
top_p=1.0,
n=1)
except Exception as e:
cot_solution = None
if cot_solution is not None:
completions.extend([choice['message']['content']
for choice in cot_solution['choices']])
completions = completions[:1]
return completions
else:
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
if time.time() - start_time > 60:
return None
def query_pal(data: dict, key: str, pal_temperature: float, backbone: str):
'''
This function is used to query OpenAI for PAL solutions.
Args:
data: a dict containing the question and answer
key: the OpenAI API key
pal_temperature: the temperature used in PAL
backbone: ChatGPT or GPT-4
Returns:
completions: a list containing the PAL solution
'''
query_message = get_pal_prompt(data)
if backbone == 'gpt4':
model_name = 'gpt-4'
elif backbone == 'chatgpt':
model_name = 'gpt-3.5-turbo'
start_time = time.time()
completions = []
while True:
try:
pal_solution = openai.ChatCompletion.create(
api_key=key,
model=model_name,
max_tokens=500,
stop='\n\n\n',
messages=query_message,
temperature=pal_temperature,
top_p=1.0,
n=1)
except Exception as e:
pal_solution = None
if pal_solution is not None:
completions.extend([choice['message']['content']
for choice in pal_solution['choices']])
completions = completions[:1]
return completions
else:
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
if time.time() - start_time > 60:
return None
def query_select(data: dict, key: str, cot_solution: list, pal_solution: list, backbone: str):
'''
This function is used to query OpenAI for selection solutions.
Args:
data: a dict containing the question and answer
key: the OpenAI API key
cot_solution: a list containing the CoT solution
pal_solution: a list containing the PAL solution
backbone: ChatGPT or GPT-4
Returns:
completions: a list containing the selection solution
'''
selection_message = get_select_prompt(
data, cot_solution, pal_solution)
if backbone == 'gpt4':
model_name = 'gpt-4'
elif backbone == 'chatgpt':
model_name = 'gpt-3.5-turbo'
start_time = time.time()
completions = []
while True:
try:
selection_solution = openai.ChatCompletion.create(
api_key=key,
model=model_name,
max_tokens=200,
stop='\n\n',
messages=selection_message,
temperature=0.,
top_p=1.0,
n=1)
except Exception as e:
selection_solution = None
if selection_solution is not None:
completions.extend([choice['message']['content']
for choice in selection_solution['choices']])
completions = completions[:1]
return completions
else:
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
if time.time() - start_time > 60:
return None
def query_date(data: dict, key: str, cot_temperature: float, pal_temperature: float, backbone: str):
'''
This function is used to query OpenAI for answers in date understanding tasks. It contains three steps:
1. Query CoT for solutions
2. Query PAL for solutions
3. Query model selection answers
Note that we only query selection answers when CoT and PAL answers are different. Otherwise, we directly use CoT or PAL answers.
Args:
data: a dict containing the question and answer
key: the OpenAI API key
cot_temperature: the temperature used in CoT.
pal_temperature: the temperature used in PAL.
backbone: ChatGPT or GPT-4
Returns:
to_dump_data: a dict containing the question, answer, the final answer and other information
'''
cot_ans = None
pal_ans = None
selection_ans = None
final_ans = None
cot_solution = query_cot(
data, key, cot_temperature, backbone=backbone)
if cot_solution is None:
print('Time out')
return None
else:
cot_ans = extract_date_cot(cot_solution[0])
pal_solution = query_pal(
data, key, pal_temperature, backbone=backbone)
if pal_solution is None:
print('Time out')
return None
else:
pal_ans = execute_date_pal(pal_solution[0])
if cot_ans is not None and pal_ans is not None:
# ==== Only select when CoT and PAL are different ====
if cot_ans != pal_ans:
selection_ans = query_select(
data, key, cot_solution=cot_solution, pal_solution=pal_solution, backbone=backbone)
if selection_ans is None:
print('Time out')
return None
else:
selection_choice = extract_choice_turbo(selection_ans[0])
if selection_choice == '(A)':
final_ans = cot_ans
elif selection_choice == '(B)':
final_ans = pal_ans
else:
final_ans = cot_ans
elif cot_ans is not None and pal_ans is None:
final_ans = cot_ans
elif cot_ans is None and pal_ans is not None:
final_ans = pal_ans
else:
final_ans = None
# === dump data ===
to_dump_data = OrderedDict(
{'index': data['index'], 'question': data['question'], 'answer': data['answer'],
'final_ans': final_ans, 'cot_executed': cot_ans, 'pal_executed': pal_ans,
'cot_generated': cot_solution, 'pal_generated': pal_solution, 'choice_solution': selection_ans}
)
return to_dump_data
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--start', type=int, default=0)
parser.add_argument('--end', type=int, default=-1)
parser.add_argument('--backbone', type=str,
choices=['chatgpt', 'gpt4'], default='gpt4')
parser.add_argument('--cot_temperature', type=float, default=0.)
parser.add_argument('--pal_temperature', type=float, default=0.)
parser.add_argument('--output_dir', type=str, default='../output/')
parser.add_argument(
'--key', type=str, default='sk-', required=True)
args = parser.parse_args()
start_index = args.start
end_index = args.end
cot_temperature = args.cot_temperature
pal_temperature = args.pal_temperature
backbone = args.backbone
output_dir = args.output_dir
key = args.key
start_time_0 = time.time()
print('Current time: ', time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime()))
dt_string = datetime.now().strftime("%m_%d_%H_%M")
dataset = jsonlines_load('../dataset/date_understanding.jsonl')
# === slice data based on start and end ===
total_num = len(dataset)
print('total data: ', total_num)
if end_index == -1:
end_index = total_num
if end_index > total_num:
end_index = total_num
tasks = dataset[start_index:end_index]
task_num = len(tasks)
print('Current total tasks: ', task_num)
unfinished_tasks = []
output_path = os.path.join(output_dir, f'{backbone}/')
if not os.path.exists(output_path):
os.makedirs(output_path)
save_path = os.path.join(output_path,
f'date_s{start_index}_e{end_index}_{dt_string}.jsonl')
# === dump data ===
progress_bar = tqdm(range(task_num))
for i in range(task_num):
task = tasks[i]
start_time = time.time()
while True:
try:
ans = query_date(
task, key=key, cot_temperature=cot_temperature,
pal_temperature=pal_temperature, backbone=backbone)
except Exception as e:
print(e)
ans = None
if ans is not None:
with open(save_path, "a+") as fout:
fout.write(json.dumps(ans)+'\n')
progress_bar.update(1)
break
else:
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
if time.time() - start_time > 120:
print('Time out')
print('Current Task: ', i)
break
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
end_time_0 = time.time()
print('Finish at time: ', time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime()))
print(f'Time used: {end_time_0 - start_time_0} seconds')
if len(unfinished_tasks) > 0:
print('Unfinished tasks: ')
for task in unfinished_tasks:
print(task)
print('Done')
| [
"PLACEHOLDER",
"Question: PLACEHOLDER\n\n# solution in Python",
"Question: PLACEHOLDER"
] |
2024-01-10 | XuZhao0/Model-Selection-Reasoning | src~selection_math_codex.py | import json
import os
import openai
import random
import time
from datetime import datetime
import argparse
import re
from tqdm import tqdm
from typing import Union
from prompts import math_prompt
from collections import OrderedDict, Counter
from tool import *
def get_cot_prompt(data: dict):
'''
This function is used to generate the CoT prompt for math problem
'''
cot_prompt = math_prompt.CODEX_COT_PROMPT
inference_prompt = cot_prompt + f'''Q: {data['question'].strip()}
'''
return inference_prompt
def get_pal_prompt(data: dict):
'''
This function is used to generate the PAL prompt for math problem
'''
pal_prompt = math_prompt.CODEX_PAL_PROMPT
inference_prompt = pal_prompt + f'''Q: {data['question'].strip()}
# solution in Python:
'''
return inference_prompt
def get_select_prompt(data: dict, cot_solution: list, pal_solution: list):
'''
This function is used to generate the selection prompt for math problem
'''
selection_prompt = math_prompt.CODEX_SELECT_PROMPT
try:
pal_generated_list = pal_solution[0].split('"""')
pal_generated = pal_generated_list[0].strip(
) + pal_generated_list[2]
except Exception as e:
pal_generated = pal_solution[0]
cot_generated = cot_solution[0]
inference_prompt = selection_prompt + f'''Math Problem: {data['question'].strip()}
Question: Which of the following two choices can correctly answer the math problem?
(A)
{pal_generated.strip()}
(B)
{cot_generated.strip()}
Answer:'''
return inference_prompt
def query_cot(data: dict, key: str, cot_temperature: float):
'''
This function is used to query OpenAI for CoT solutions.
Args:
data: a dict containing the question and answer
key: OpenAI API key
cot_temperature: temperature used for CoT
Returns:
completions: a list of CoT solutions
'''
cot_prompt = get_cot_prompt(data)
start_time = time.time()
completions = []
while True:
try:
cot_solution = openai.Completion.create(
api_key=key,
model='code-davinci-002',
max_tokens=500,
stop='\n\n\n',
prompt=cot_prompt,
temperature=cot_temperature,
top_p=1.0,
n=1,
best_of=1)
except Exception as e:
cot_solution = None
if cot_solution is not None:
completions.extend([choice['text']
for choice in cot_solution['choices']])
completions = completions[:1]
return completions
else:
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
if time.time() - start_time > 60:
return None
def query_pal(data: dict, key: str, pal_temperature: float):
'''
This function is used to query OpenAI for PAL solutions.
Args:
data: a dict containing the question and answer
key: OpenAI API key
pal_temperature: temperature used for PAL
Returns:
completions: a list of PAL solutions
'''
pal_prompt = get_pal_prompt(data)
start_time = time.time()
completions = []
while True:
try:
pal_solution = openai.Completion.create(
api_key=key,
model='code-davinci-002',
max_tokens=500,
stop='\n\n\n',
prompt=pal_prompt,
temperature=pal_temperature,
top_p=1.0,
n=1,
best_of=1)
except Exception as e:
pal_solution = None
if pal_solution is not None:
completions.extend([choice['text']
for choice in pal_solution['choices']])
completions = completions[:1]
return completions
else:
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
if time.time() - start_time > 60:
return None
def query_select(data: dict, key: str, cot_solution: list, pal_solution: list):
'''
This function is used to query OpenAI for selection solutions.
Args:
data: a dict containing the question and answer
key: OpenAI API key
cot_solution: a list of CoT solutions
pal_solution: a list of PAL solutions
Returns:
completions: a list of selection solutions
'''
selection_prompt = get_select_prompt(
data, cot_solution, pal_solution)
start_time = time.time()
completions = []
while True:
try:
selection_solution = openai.Completion.create(
api_key=key,
model='code-davinci-002',
max_tokens=100,
stop='\n\n',
prompt=selection_prompt,
temperature=0.,
top_p=1.0,
n=1,
best_of=1)
except Exception as e:
selection_solution = None
if selection_solution is not None:
completions.extend([choice['text']
for choice in selection_solution['choices']])
completions = completions[:1]
return completions
else:
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
if time.time() - start_time > 60:
return None
def query_math(data: dict, key: str, cot_temperature: float, pal_temperature: float, sc_num: int):
'''
This function is used to query OpenAI for answers in arithmetic tasks. It contains three steps:
1. Query CoT for solutions
2. Query PAL for solutions
3. Query model selection answers
Note that we only query selection answers when CoT and PAL answers are different. Otherwise, we directly use CoT or PAL answers.
We also use majority voting to select the final answer if we have multiple self-consistency samples.
Args:
data: a dict containing the question and answer
key: the OpenAI API key
cot_temperature: the temperature used in CoT. 0 for greedy decoding. We set it to 0.5 for self-consistency samples.
pal_temperature: the temperature used in PAL. 0 for greedy decoding. We set it to 0.8 for self-consistency samples.
sc_num: the number of self-consistency samples
Returns:
to_dump_data: a dict containing the question, answer, the final answer and other information
'''
cot_answers = []
pal_answers = []
cot_solutions = []
pal_solutions = []
selection_solutions = []
final_answers = []
for i in range(sc_num):
cot_ans = None
pal_ans = None
selection_ans = None
final_ans = None
cot_solution = query_cot(
data, key, cot_temperature)
if cot_solution is None:
print('Time out')
return None
else:
cot_ans = extract_num_codex(cot_solution[0])
cot_answers.append(cot_ans)
cot_solutions.append(cot_solution[0])
pal_solution = query_pal(
data, key, pal_temperature)
if pal_solution is None:
print('Time out')
return None
else:
pal_ans = safe_execute_codex(pal_solution[0])
pal_answers.append(pal_ans)
pal_solutions.append(pal_solution[0])
if cot_ans is not None and pal_ans is not None:
# ==== Only select when CoT and PAL are different ====
if abs(cot_ans - pal_ans) > 1e-3:
selection_ans = query_select(
data, key, cot_solution=cot_solution, pal_solution=pal_solution)
if selection_ans is None:
print('Time out')
return None
else:
selection_choice = extract_choice_codex(selection_ans[0])
selection_solutions.append(selection_ans[0])
if selection_choice == '(A)':
final_ans = pal_ans
elif selection_choice == '(B)':
final_ans = cot_ans
else:
final_ans = cot_ans
elif cot_ans is not None and pal_ans is None:
final_ans = cot_ans
elif cot_ans is None and pal_ans is not None:
final_ans = pal_ans
else:
final_ans = None
final_answers.append(final_ans)
count = Counter(final_answers)
majority_ans = count.most_common(1)[0][0]
# === dump data ===
to_dump_data = OrderedDict(
{'index': data['index'], 'question': data['question'], 'answer': data['answer'],
'majority_ans': majority_ans, 'final_answers': final_answers,
'cot_executed': cot_answers, 'pal_executed': pal_answers,
'cot_generated': cot_solutions, 'pal_generated': pal_solutions, 'choice_solution': selection_solutions}
)
return to_dump_data
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--start', type=int, default=0)
parser.add_argument('--end', type=int, default=-1)
parser.add_argument('--dataset', type=str, choices=[
'gsm8k', 'svamp', 'asdiv', 'singleeq', 'singleop',
'singleaddsub', 'multiarith'], default='gsm8k')
parser.add_argument('--cot_temperature', type=float, default=0.)
parser.add_argument('--pal_temperature', type=float, default=0.)
parser.add_argument('--sc_num', type=int, default=1,
help='Self-consistency samples. 1 indicates greedy decoding')
parser.add_argument('--output_dir', type=str, default='../output/')
parser.add_argument(
'--key', type=str, default='sk-', required=True)
args = parser.parse_args()
start_index = args.start
end_index = args.end
dataset_name = args.dataset
cot_temperature = args.cot_temperature
pal_temperature = args.pal_temperature
sc_num = args.sc_num
output_dir = args.output_dir
key = args.key
start_time_0 = time.time()
print('Current time: ', time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime()))
dt_string = datetime.now().strftime("%m_%d_%H_%M")
if dataset_name == 'gsm8k':
dataset = jsonlines_load('../dataset/gsm8K_test.jsonl')
elif dataset_name == 'svamp':
dataset = jsonlines_load('../dataset/svamp.jsonl')
elif dataset_name == 'asdiv':
dataset = jsonlines_load('../dataset/asdiv.jsonl')
elif dataset_name == 'singleeq':
dataset = jsonlines_load('../dataset/single_eq.jsonl')
elif dataset_name == 'singleop':
dataset = jsonlines_load('../dataset/single_op.jsonl')
elif dataset_name == 'singleaddsub':
dataset = jsonlines_load('../dataset/single_addsub.jsonl')
elif dataset_name == 'multiarith':
dataset = jsonlines_load('../dataset/multiarith.jsonl')
# === slice data based on start and end ===
total_num = len(dataset)
print('total data: ', total_num)
if end_index == -1:
end_index = total_num
if end_index > total_num:
end_index = total_num
tasks = dataset[start_index:end_index]
task_num = len(tasks)
print('Current total tasks: ', task_num)
unfinished_tasks = []
output_path = os.path.join(output_dir, 'codex/')
if not os.path.exists(output_path):
os.makedirs(output_path)
save_path = os.path.join(output_path,
f'{dataset_name}_sc{sc_num}_s{start_index}_e{end_index}_{dt_string}.jsonl')
# === dump data ===
progress_bar = tqdm(range(task_num))
for i in range(task_num):
task = tasks[i]
wait_time = min(sc_num * 100, 360)
start_time = time.time()
while True:
try:
ans = query_math(
task, key=key, cot_temperature=cot_temperature,
pal_temperature=pal_temperature, sc_num=sc_num)
except Exception as e:
print(e)
ans = None
if ans is not None:
with open(save_path, "a+") as fout:
fout.write(json.dumps(ans)+'\n')
progress_bar.update(1)
break
else:
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
if time.time() - start_time > wait_time:
print('Time out')
print('Current Task: ', i)
unfinished_tasks.append(task)
break
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
end_time_0 = time.time()
print('Finish at time: ', time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime()))
print(f'Time used: {end_time_0 - start_time_0} seconds')
if len(unfinished_tasks) > 0:
print('Unfinished tasks: ')
for task in unfinished_tasks:
print(task)
print('Done')
| [
"question",
"PLACEHOLDERQ: PLACEHOLDER\n\n# solution in Python:\n",
"PLACEHOLDERQ: PLACEHOLDER\n\n"
] |
2024-01-10 | XuZhao0/Model-Selection-Reasoning | src~selection_math.py | import json
import os
import openai
import random
import time
from datetime import datetime
import argparse
from tqdm import tqdm
from typing import Union
from prompts import math_prompt
from collections import OrderedDict, Counter
from tool import *
def get_user_assistant_messages(system_message: str, user_message: str, assistant_message: str):
'''
This function is used to convert the prompt into the message format used by OpenAI Chat API.
'''
messages = []
messages.append({"role": "system", "content": system_message})
split_user_messages = user_message.split('\n\n\n\n')
split_assistant_messages = assistant_message.split('\n\n\n\n')
for i in range(len(split_user_messages)):
question = split_user_messages[i]
answer = split_assistant_messages[i]
messages += [
{"role": "user", "content": f"{question}"},
{"role": "assistant", "content": f"{answer}"},
]
return messages
def get_cot_prompt(data: dict, backbone: str):
'''
This function is used to generate the CoT prompt.
'''
if backbone == 'gpt4':
system_message = math_prompt.GPT4_COT_SYSTEM
user_message = math_prompt.GPT4_COT_USER
assistant_message = math_prompt.GPT4_COT_ASSISTANT
elif backbone == 'chatgpt':
system_message = math_prompt.TURBO_COT_SYSTEM
user_message = math_prompt.TURBO_COT_USER
assistant_message = math_prompt.TURBO_COT_ASSISTANT
messages = get_user_assistant_messages(
system_message, user_message, assistant_message)
question_message = data['question']
messages += [{"role": "user", "content": f"Question: {question_message}"}]
return messages
def get_pal_prompt(data: dict, backbone: str):
'''
This function is used to generate the PAL prompt.
'''
if backbone == 'gpt4':
system_message = math_prompt.GPT4_PAL_SYSTEM
user_message = math_prompt.GPT4_PAL_USER
assistant_message = math_prompt.GPT4_PAL_ASSISTANT
messages = get_user_assistant_messages(
system_message, user_message, assistant_message)
question_message = data['question']
messages += [{"role": "user",
"content": f"Question: {question_message}\n\n# solution in Python"}]
elif backbone == 'chatgpt':
system_message = math_prompt.TURBO_PAL_SYSTEM
user_message = math_prompt.TURBO_PAL_USER
assistant_message = math_prompt.TURBO_PAL_ASSISTANT
messages = get_user_assistant_messages(
system_message, user_message, assistant_message)
question_message = data['question']
messages += [{"role": "user",
"content": f"Answer the following question in Python: {question_message}"}]
return messages
def get_select_prompt(data: dict, cot_solution: list, pal_solution: list, backbone: str):
'''
This function is used to generate the selection prompt.
'''
if backbone == 'gpt4':
system_message = math_prompt.GPT4_SELECT_SYSTEM
user_message = math_prompt.GPT4_SELECT_USER
assistant_message = math_prompt.GPT4_SELECT_ASSISTANT
elif backbone == 'chatgpt':
system_message = math_prompt.TURBO_SELECT_SYSTEM
user_message = math_prompt.TURBO_SELECT_USER
assistant_message = math_prompt.TURBO_SELECT_ASSISTANT
messages = get_user_assistant_messages(
system_message, user_message, assistant_message)
try:
pal_generated_list = pal_solution[0].split('"""')
pal_generated = pal_generated_list[0].strip(
) + pal_generated_list[2]
except Exception as e:
pal_generated = pal_solution[0]
if cot_solution[0].startswith('Answer:'):
cot_generated = cot_solution[0]
else:
cot_generated = 'Answer:\n' + cot_solution[0]
user_message = f'''Math problem: {data['question'].strip()}
(A)
{cot_generated.strip()}
(B)
{pal_generated.strip()}
Which of the above two choices can correctly answer the math problem?'''
messages += [{"role": "user", "content": user_message}]
return messages
def query_cot(data: dict, key: str, cot_temperature: float, backbone: str):
'''
This function is used to query OpenAI for CoT solutions.
Args:
data: a dict containing the question and answer
key: the OpenAI API key
cot_temperature: the temperature used in CoT
backbone: ChatGPT or GPT-4
Returns:
completions: a list containing the CoT solution
'''
query_message = get_cot_prompt(data, backbone=backbone)
if backbone == 'gpt4':
model_name = 'gpt-4'
elif backbone == 'chatgpt':
model_name = 'gpt-3.5-turbo'
start_time = time.time()
completions = []
while True:
try:
cot_solution = openai.ChatCompletion.create(
api_key=key,
model=model_name,
max_tokens=500,
stop='\n\n\n',
messages=query_message,
temperature=cot_temperature,
top_p=1.0,
n=1)
except Exception as e:
cot_solution = None
if cot_solution is not None:
completions.extend([choice['message']['content']
for choice in cot_solution['choices']])
completions = completions[:1]
return completions
else:
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
if time.time() - start_time > 60:
return None
def query_pal(data: dict, key: str, pal_temperature: float, backbone: str):
'''
This function is used to query OpenAI for PAL solutions.
Args:
data: a dict containing the question and answer
key: the OpenAI API key
pal_temperature: the temperature used in PAL
backbone: ChatGPT or GPT-4
Returns:
completions: a list containing the PAL solution
'''
query_message = get_pal_prompt(data, backbone=backbone)
if backbone == 'gpt4':
model_name = 'gpt-4'
elif backbone == 'chatgpt':
model_name = 'gpt-3.5-turbo'
start_time = time.time()
completions = []
while True:
try:
pal_solution = openai.ChatCompletion.create(
api_key=key,
model=model_name,
max_tokens=500,
stop='\n\n\n',
messages=query_message,
temperature=pal_temperature,
top_p=1.0,
n=1)
except Exception as e:
pal_solution = None
if pal_solution is not None:
completions.extend([choice['message']['content']
for choice in pal_solution['choices']])
completions = completions[:1]
return completions
else:
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
if time.time() - start_time > 60:
return None
def query_selection(data: dict, key: str, cot_solution: list, pal_solution: list, backbone: str):
'''
This function is used to query OpenAI for selection solutions.
Args:
data: a dict containing the question and answer
key: the OpenAI API key
cot_solution: a list containing the CoT solution
pal_solution: a list containing the PAL solution
backbone: ChatGPT or GPT-4
Returns:
completions: a list containing the selection solution
'''
selection_message = get_select_prompt(
data, cot_solution, pal_solution, backbone=backbone)
if backbone == 'gpt4':
model_name = 'gpt-4'
elif backbone == 'chatgpt':
model_name = 'gpt-3.5-turbo'
start_time = time.time()
completions = []
while True:
try:
selection_solution = openai.ChatCompletion.create(
api_key=key,
model=model_name,
max_tokens=200,
stop='\n\n',
messages=selection_message,
temperature=0.,
top_p=1.0,
n=1)
except Exception as e:
selection_solution = None
if selection_solution is not None:
completions.extend([choice['message']['content']
for choice in selection_solution['choices']])
completions = completions[:1]
return completions
else:
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
if time.time() - start_time > 60:
return None
def query_math(data: dict, key: str, cot_temperature: float, pal_temperature: float, sc_num: int, backbone: str):
'''
This function is used to query OpenAI for answers in arithmetic tasks. It contains three steps:
1. Query CoT for solutions
2. Query PAL for solutions
3. Query model selection answers
Note that we only query selection answers when CoT and PAL answers are different. Otherwise, we directly use CoT or PAL answers.
We also use majority voting to select the final answer if we have multiple self-consistency samples.
Args:
data: a dict containing the question and answer
key: the OpenAI API key
cot_temperature: the temperature used in CoT. 0 for greedy decoding. We set it to 0.5 for self-consistency samples.
pal_temperature: the temperature used in PAL. 0 for greedy decoding. We set it to 0.8 for self-consistency samples.
sc_num: the number of self-consistency samples
backbone: ChatGPT or GPT-4
Returns:
to_dump_data: a dict containing the question, answer, the final answer and other information
'''
cot_answers = []
pal_answers = []
cot_solutions = []
pal_solutions = []
selection_solutions = []
final_answers = []
for i in range(sc_num):
cot_ans = None
pal_ans = None
selection_ans = None
final_ans = None
cot_solution = query_cot(
data, key, cot_temperature, backbone=backbone)
if cot_solution is None:
print('Time out')
return None
else:
cot_ans = extract_num_turbo(cot_solution[0])
cot_answers.append(cot_ans)
cot_solutions.append(cot_solution[0])
pal_solution = query_pal(
data, key, pal_temperature, backbone=backbone)
if pal_solution is None:
print('Time out')
return None
else:
pal_ans = safe_execute_turbo(pal_solution[0])
pal_answers.append(pal_ans)
pal_solutions.append(pal_solution[0])
if cot_ans is not None and pal_ans is not None:
# ==== Only select when CoT and PAL are different ====
if abs(cot_ans - pal_ans) > 1e-3:
selection_ans = query_selection(
data, key, cot_solution=cot_solution, pal_solution=pal_solution, backbone=backbone)
if selection_ans is None:
print('Time out')
return None
else:
selection_choice = extract_choice_turbo(selection_ans[0])
selection_solutions.append(selection_ans[0])
if selection_choice == '(A)':
final_ans = cot_ans
elif selection_choice == '(B)':
final_ans = pal_ans
else:
final_ans = cot_ans
elif cot_ans is not None and pal_ans is None:
final_ans = cot_ans
elif cot_ans is None and pal_ans is not None:
final_ans = pal_ans
else:
final_ans = None
final_answers.append(final_ans)
count = Counter(final_answers)
majority_ans = count.most_common(1)[0][0]
# === dump data ===
to_dump_data = OrderedDict(
{'index': data['index'], 'question': data['question'], 'answer': data['answer'],
'majority_ans': majority_ans, 'final_answers': final_answers,
'cot_executed': cot_answers, 'pal_executed': pal_answers,
'cot_generated': cot_solutions, 'pal_generated': pal_solutions, 'choice_solution': selection_solutions}
)
return to_dump_data
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--start', type=int, default=0)
parser.add_argument('--end', type=int, default=-1)
parser.add_argument('--dataset', type=str, choices=[
'gsm8k', 'svamp', 'asdiv', 'singleeq', 'singleop',
'singleaddsub', 'multiarith'], default='gsm8k')
parser.add_argument('--backbone', type=str,
choices=['chatgpt', 'gpt4'], default='gpt4')
parser.add_argument('--cot_temperature', type=float, default=0.)
parser.add_argument('--pal_temperature', type=float, default=0.)
parser.add_argument('--sc_num', type=int, default=1,
help='Self-consistency samples. 1 indicates greedy decoding')
parser.add_argument('--output_dir', type=str, default='../output/')
parser.add_argument(
'--key', type=str, default='sk-', required=True)
args = parser.parse_args()
start_index = args.start
end_index = args.end
dataset_name = args.dataset
cot_temperature = args.cot_temperature
pal_temperature = args.pal_temperature
backbone = args.backbone
sc_num = args.sc_num
output_dir = args.output_dir
key = args.key
start_time_0 = time.time()
print('Current time: ', time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime()))
dt_string = datetime.now().strftime("%m_%d_%H_%M")
if dataset_name == 'gsm8k':
dataset = jsonlines_load('../dataset/gsm8K_test.jsonl')
elif dataset_name == 'svamp':
dataset = jsonlines_load('../dataset/svamp.jsonl')
elif dataset_name == 'asdiv':
dataset = jsonlines_load('../dataset/asdiv.jsonl')
elif dataset_name == 'singleeq':
dataset = jsonlines_load('../dataset/single_eq.jsonl')
elif dataset_name == 'singleop':
dataset = jsonlines_load('../dataset/single_op.jsonl')
elif dataset_name == 'singleaddsub':
dataset = jsonlines_load('../dataset/single_addsub.jsonl')
elif dataset_name == 'multiarith':
dataset = jsonlines_load('../dataset/multiarith.jsonl')
# === slice data based on start and end ===
total_num = len(dataset)
print('total data: ', total_num)
if end_index == -1:
end_index = total_num
if end_index > total_num:
end_index = total_num
tasks = dataset[start_index:end_index]
task_num = len(tasks)
print('Current total tasks: ', task_num)
unfinished_tasks = []
output_path = os.path.join(output_dir, f'{backbone}/')
if not os.path.exists(output_path):
os.makedirs(output_path)
save_path = os.path.join(output_path,
f'{dataset_name}_sc{sc_num}_s{start_index}_e{end_index}_{dt_string}.jsonl')
# === run experiments ===
progress_bar = tqdm(range(task_num))
for i in range(task_num):
task = tasks[i]
wait_time = min(sc_num * 100, 360)
start_time = time.time()
while True:
try:
ans = query_math(
task, key=key, cot_temperature=cot_temperature,
pal_temperature=pal_temperature, sc_num=sc_num, backbone=backbone)
except Exception as e:
print(e)
ans = None
if ans is not None:
with open(save_path, "a+") as fout:
fout.write(json.dumps(ans)+'\n')
progress_bar.update(1)
break
else:
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
if time.time() - start_time > wait_time:
print('Time out')
print('Current Task: ', i)
unfinished_tasks.append(task)
break
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
end_time_0 = time.time()
print('Finish at time: ', time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime()))
print(f'Time used: {end_time_0 - start_time_0} seconds')
if len(unfinished_tasks) > 0:
print('Unfinished tasks: ')
for task in unfinished_tasks:
print(task)
print('Done')
| [
"PLACEHOLDER",
"Question: PLACEHOLDER\n\n# solution in Python",
"Answer the following question in Python: PLACEHOLDER",
"Question: PLACEHOLDER"
] |
2024-01-10 | XuZhao0/Model-Selection-Reasoning | src~selection_date_codex.py | import json
import os
import openai
import random
import time
from datetime import datetime
import argparse
import re
from tqdm import tqdm
from typing import Union
from prompts import date_prompt
from collections import OrderedDict, Counter
from tool import *
def get_cot_prompt(data: dict):
'''
This function is used to generate the CoT prompt for date understanding problem
'''
cot_prompt = date_prompt.CODEX_COT_PROMPT
inference_prompt = cot_prompt + f'''Q: {data['question'].strip()}
'''
return inference_prompt
def get_pal_prompt(data: dict):
'''
This function is used to generate the PAL prompt for date understanding problem
'''
pal_prompt = date_prompt.CODEX_PAL_PROMPT
inference_prompt = pal_prompt + f'''Q: {data['question'].strip()}
# solution in Python:
'''
return inference_prompt
def get_select_prompt(data: dict, cot_solution: list, pal_solution: list):
'''
This function is used to generate the selection prompt for date understanding problem
'''
selection_prompt = date_prompt.CODEX_SELECT_PROMPT
try:
pal_generated_list = pal_solution[0].split('"""')
pal_generated = pal_generated_list[0].strip(
) + pal_generated_list[2]
except Exception as e:
pal_generated = pal_solution[0]
cot_generated = cot_solution[0]
inference_prompt = selection_prompt + f'''Date Understanding Problem: {data['question'].strip()}
Question: Which of the following two choices can correctly answer the math problem?
(A)
{pal_generated.strip()}
(B)
{cot_generated.strip()}
Answer:'''
return inference_prompt
def query_cot(data: dict, key: str, cot_temperature: float):
'''
This function is used to query OpenAI for CoT solutions.
Args:
data: a dict containing the question and answer
key: OpenAI API key
cot_temperature: temperature used for CoT
Returns:
completions: a list of CoT solutions
'''
cot_prompt = get_cot_prompt(data)
start_time = time.time()
completions = []
while True:
try:
cot_solution = openai.Completion.create(
api_key=key,
model='code-davinci-002',
max_tokens=500,
stop='\n\n\n',
prompt=cot_prompt,
temperature=cot_temperature,
top_p=1.0,
n=1,
best_of=1)
except Exception as e:
cot_solution = None
if cot_solution is not None:
completions.extend([choice['text']
for choice in cot_solution['choices']])
completions = completions[:1]
return completions
else:
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
if time.time() - start_time > 60:
return None
def query_pal(data: dict, key: str, pal_temperature: float):
'''
This function is used to query OpenAI for PAL solutions.
Args:
data: a dict containing the question and answer
key: OpenAI API key
pal_temperature: temperature used for PAL
Returns:
completions: a list of PAL solutions
'''
pal_prompt = get_pal_prompt(data)
start_time = time.time()
completions = []
while True:
try:
pal_solution = openai.Completion.create(
api_key=key,
model='code-davinci-002',
max_tokens=500,
stop='\n\n\n',
prompt=pal_prompt,
temperature=pal_temperature,
top_p=1.0,
n=1,
best_of=1)
except Exception as e:
pal_solution = None
if pal_solution is not None:
completions.extend([choice['text']
for choice in pal_solution['choices']])
completions = completions[:1]
return completions
else:
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
if time.time() - start_time > 60:
return None
def query_select(data: dict, key: str, cot_solution: list, pal_solution: list):
'''
This function is used to query OpenAI for selection solutions.
Args:
data: a dict containing the question and answer
key: OpenAI API key
cot_solution: a list of CoT solutions
pal_solution: a list of PAL solutions
Returns:
completions: a list of selection solutions
'''
selection_prompt = get_select_prompt(
data, cot_solution, pal_solution)
start_time = time.time()
completions = []
while True:
try:
selection_solution = openai.Completion.create(
api_key=key,
model='code-davinci-002',
max_tokens=100,
stop='\n\n',
prompt=selection_prompt,
temperature=0.,
top_p=1.0,
n=1,
best_of=1)
except Exception as e:
selection_solution = None
if selection_solution is not None:
completions.extend([choice['text']
for choice in selection_solution['choices']])
completions = completions[:1]
return completions
else:
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
if time.time() - start_time > 60:
return None
def query_date(data: dict, key: str, cot_temperature: float, pal_temperature: float):
'''
This function is used to query OpenAI for answers in date understanding tasks. It contains three steps:
1. Query CoT for solutions
2. Query PAL for solutions
3. Query model selection answers
Note that we only query selection answers when CoT and PAL answers are different. Otherwise, we directly use CoT or PAL answers.
Args:
data: a dict containing the question and answer
key: the OpenAI API key
cot_temperature: the temperature used in CoT. 0 for greedy decoding.
pal_temperature: the temperature used in PAL. 0 for greedy decoding.
Returns:
to_dump_data: a dict containing the question, answer, the final answer and other information
'''
cot_ans = None
pal_ans = None
selection_ans = None
final_ans = None
cot_solution = query_cot(
data, key, cot_temperature)
if cot_solution is None:
print('Time out')
return None
else:
cot_ans = extract_date_cot(cot_solution[0])
pal_solution = query_pal(
data, key, pal_temperature)
if pal_solution is None:
print('Time out')
return None
else:
pal_ans = execute_date_pal(pal_solution[0])
if cot_ans is not None and pal_ans is not None:
# ==== Only select when CoT and PAL are different ====
if cot_ans != pal_ans:
selection_ans = query_select(
data, key, cot_solution=cot_solution, pal_solution=pal_solution)
if selection_ans is None:
print('Time out')
return None
else:
selection_choice = extract_choice_codex(selection_ans[0])
if selection_choice == '(A)':
final_ans = pal_ans
elif selection_choice == '(B)':
final_ans = cot_ans
else:
final_ans = cot_ans
elif cot_ans is not None and pal_ans is None:
final_ans = cot_ans
elif cot_ans is None and pal_ans is not None:
final_ans = pal_ans
else:
final_ans = None
# === dump data ===
to_dump_data = OrderedDict(
{'index': data['index'], 'question': data['question'], 'answer': data['answer'],
'final_ans': final_ans, 'cot_executed': cot_ans, 'pal_executed': pal_ans,
'cot_generated': cot_solution, 'pal_generated': pal_solution, 'choice_solution': selection_ans}
)
return to_dump_data
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--start', type=int, default=0)
parser.add_argument('--end', type=int, default=-1)
parser.add_argument('--cot_temperature', type=float, default=0.)
parser.add_argument('--pal_temperature', type=float, default=0.)
parser.add_argument('--output_dir', type=str, default='../output/')
parser.add_argument(
'--key', type=str, default='sk-', required=True)
args = parser.parse_args()
start_index = args.start
end_index = args.end
cot_temperature = args.cot_temperature
pal_temperature = args.pal_temperature
output_dir = args.output_dir
key = args.key
start_time_0 = time.time()
print('Current time: ', time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime()))
dt_string = datetime.now().strftime("%m_%d_%H_%M")
dataset = jsonlines_load('../dataset/date_understanding.jsonl')
# === slice data based on start and end ===
total_num = len(dataset)
print('total data: ', total_num)
if end_index == -1:
end_index = total_num
if end_index > total_num:
end_index = total_num
tasks = dataset[start_index:end_index]
task_num = len(tasks)
print('Current total tasks: ', task_num)
unfinished_tasks = []
output_path = os.path.join(output_dir, 'codex/')
if not os.path.exists(output_path):
os.makedirs(output_path)
save_path = os.path.join(output_path,
f'date_s{start_index}_e{end_index}_{dt_string}.jsonl')
# === dump data ===
progress_bar = tqdm(range(task_num))
for i in range(task_num):
task = tasks[i]
start_time = time.time()
while True:
try:
ans = query_date(
task, key=key, cot_temperature=cot_temperature,
pal_temperature=pal_temperature)
except Exception as e:
print(e)
ans = None
if ans is not None:
with open(save_path, "a+") as fout:
fout.write(json.dumps(ans)+'\n')
progress_bar.update(1)
break
else:
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
if time.time() - start_time > 120:
print('Time out')
print('Current Task: ', i)
unfinished_tasks.append(task)
break
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
end_time_0 = time.time()
print('Finish at time: ', time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime()))
print(f'Time used: {end_time_0 - start_time_0} seconds')
if len(unfinished_tasks) > 0:
print('Unfinished tasks: ')
for task in unfinished_tasks:
print(task)
print('Done')
| [
"question",
"PLACEHOLDERQ: PLACEHOLDER\n\n# solution in Python:\n",
"PLACEHOLDERQ: PLACEHOLDER\n\n"
] |
2024-01-10 | saiivarma/AlumAI | database.py | import os
import pinecone
from langchain.document_loaders import UnstructuredPDFLoader, OnlinePDFLoader
from LLM import get_embedding
def init(API_key, env):
"""
Initializing pinecone
"""
pinecone.init(api_key = API_key, environment = env)
active_indexes = pinecone.list_indexes()
if active_indexes == []:
pinecone.create_index("alumai", dimension=1536, metric = 'cosine' )
return pinecone.list_indexes()
def data_loader(file):
return UnstructuredPDFLoader(file).load()
def load_data(data_path):
data = {}
files = os.listdir(data_path)
for i in files:
data[i] = data_loader("{}/{}".format(data_path,i))
return data
def create_vectors(data, category):
embeddings = []
for i in data.keys():
embeddings.append((i,get_embedding(data[i][0].page_content),{'source':data[i][0].metadata['source'],'category':category}))
return embeddings
def insert(index_name, embeddings):
index = pinecone.Index(index_name)
status = index.upsert(embeddings)
return status
def similarity_search(index_name, query, category):
index = pinecone.Index(index_name)
results = index.query(
vector=get_embedding(query),
filter={
"category": {"$eq": category}
},
top_k=2,
include_metadata=True
)
ids = []
for i in results['matches']:
ids.append(i['id'])
return ids
def fetch_data(ids, data_path):
data = []
for i in ids:
data.append(data_loader("{}/{}".format(data_path,i))[0])
return data
| [] |
2024-01-10 | YichenGong/Agent47Agent | pommerman~cli~train_with_tensorforce.py | """Train an agent with TensorForce.
Call this with a config, a game, and a list of agents, one of which should be a
tensorforce agent. The script will start separate threads to operate the agents
and then report back the result.
An example with all three simple agents running ffa:
python train_with_tensorforce.py \
--agents=tensorforce::ppo,test::agents.SimpleAgent,test::agents.SimpleAgent,test::agents.SimpleAgent \
--config=PommeFFA-v0
"""
import atexit
import functools
import os
import argparse
import docker
from tensorforce.execution import Runner
from tensorforce.contrib.openai_gym import OpenAIGym
import gym
from .. import helpers, make
from ..agents import TensorForceAgent
client = docker.from_env()
def clean_up_agents(agents):
"""Stops all agents"""
return [agent.shutdown() for agent in agents]
class WrappedEnv(OpenAIGym):
def __init__(self, gym, visualize=False):
self.gym = gym
self.visualize = visualize
def execute(self, actions):
if self.visualize:
self.gym.render()
obs = self.gym.get_observations()
all_actions = self.gym.act(obs)
all_actions.insert(self.gym.training_agent, actions)
state, reward, terminal, _ = self.gym.step(all_actions)
agent_state = self.gym.featurize(state[self.gym.training_agent])
agent_reward = reward[self.gym.training_agent]
return agent_state, terminal, agent_reward
def reset(self):
obs = self.gym.reset()
agent_obs = self.gym.featurize(obs[3])
return agent_obs
def main():
parser = argparse.ArgumentParser(description="Playground Flags.")
parser.add_argument("--game",
default="pommerman",
help="Game to choose.")
parser.add_argument("--config",
default="PommeFFA-v0",
help="Configuration to execute. See env_ids in "
"configs.py for options.")
parser.add_argument("--agents",
default="tensorforce::ppo,test::agents.SimpleAgent,"
"test::agents.SimpleAgent,test::agents.SimpleAgent",
help="Comma delineated list of agent types and docker "
"locations to run the agents.")
parser.add_argument("--agent_env_vars",
help="Comma delineated list of agent environment vars "
"to pass to Docker. This is only for the Docker Agent."
" An example is '0:foo=bar:baz=lar,3:foo=lam', which "
"would send two arguments to Docker Agent 0 and one to"
" Docker Agent 3.",
default="")
parser.add_argument("--record_pngs_dir",
default=None,
help="Directory to record the PNGs of the game. "
"Doesn't record if None.")
parser.add_argument("--record_json_dir",
default=None,
help="Directory to record the JSON representations of "
"the game. Doesn't record if None.")
parser.add_argument("--render",
default=False,
action='store_true',
help="Whether to render or not. Defaults to False.")
parser.add_argument("--game_state_file",
default=None,
help="File from which to load game state. Defaults to "
"None.")
args = parser.parse_args()
config = args.config
record_pngs_dir = args.record_pngs_dir
record_json_dir = args.record_json_dir
agent_env_vars = args.agent_env_vars
game_state_file = args.game_state_file
# TODO: After https://github.com/MultiAgentLearning/playground/pull/40
# this is still missing the docker_env_dict parsing for the agents.
agents = [
helpers.make_agent_from_string(agent_string, agent_id+1000)
for agent_id, agent_string in enumerate(args.agents.split(","))
]
env = make(config, agents, game_state_file)
training_agent = None
for agent in agents:
if type(agent) == TensorForceAgent:
training_agent = agent
env.set_training_agent(agent.agent_id)
break
if args.record_pngs_dir:
assert not os.path.isdir(args.record_pngs_dir)
os.makedirs(args.record_pngs_dir)
if args.record_json_dir:
assert not os.path.isdir(args.record_json_dir)
os.makedirs(args.record_json_dir)
# Create a Proximal Policy Optimization agent
agent = training_agent.initialize(env)
atexit.register(functools.partial(clean_up_agents, agents))
wrapped_env = WrappedEnv(env, visualize=args.render)
runner = Runner(agent=agent, environment=wrapped_env)
runner.run(episodes=10, max_episode_timesteps=2000)
print("Stats: ", runner.episode_rewards, runner.episode_timesteps,
runner.episode_times)
try:
runner.close()
except AttributeError as e:
pass
if __name__ == "__main__":
main()
| [] |
2024-01-10 | sshleifer/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
from __future__ import absolute_import, division, print_function
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
""" Yields examples. """
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | Slickytail/Group-Therapy | judge_human.py | import openai
import json
from itertools import chain
from rich import print
import argparse
from therapy.agent import from_file as agents_from_file
from chat import chat_step
def read_loop(conversation, speakers, judge):
"""
Reads a conversation and has the judge pretend to pick between the real response and the generated responses
"""
for (i, message) in enumerate(conversation):
# nothing to do for user messages
if message["role"] == "user":
print(f"[green] Thinker [/green][grey85]:[/grey85] {message['content']}")
else:
# get all the previous messages
messages_up_to_now = conversation[:i]
suggestions = [message["content"]]
# Speaker Pass: get batches of messages from the speakers
# and flatten the list of lists into a single list
suggestions += list(chain(*(chat_step(messages_up_to_now, speaker, n=1) for speaker in speakers)))
# pretty-print the options
for (i, suggestion) in enumerate(suggestions):
name = "Human" if i == 0 else f"{speakers[i-1].name}GPT"
print(f"[blue] {name} ({i}) [/blue][grey85]:[/grey85] {suggestion}")
# Judge Pass: pick between a batch of messages
# judge read the entire history, plus a system message containing the proposed messages
judge_history = messages_up_to_now + [{"role": "system",
"content": "\n".join(f"(Helper {i}) \n{s}" for i, s in enumerate(suggestions))
}]
judgement = chat_step(judge_history, judge)[0]
print(f"[red] Judge : {judgement} \n[/red]")
if __name__ == "__main__":
# parse command-line arguments
parser = argparse.ArgumentParser(description='Judge a human conversation.')
parser.add_argument('file', metavar='file', type=str,
help='Path to the file containing the conversation')
args = parser.parse_args()
fname = args.file
# read the file
with open(fname) as f:
session = json.load(f).get("messages", [])
# sort the session by timestamp
session.sort(key = lambda m: m["time"])
# the human session may have multiple messages in a row from each side.
# we want to group them together into a single message
messages = []
for msg in session:
role = "user" if msg["user"] == "thinker" else "assistant"
if len(messages) == 0 or messages[-1]["role"] != role:
messages.append({"role": role, "content": msg["text"]})
else:
messages[-1]["content"] += "\n\n" + msg["text"]
# read the API key from the file
with open("config.json") as f:
config = json.load(f)
openai.api_key = config["api_key"]
# some openai keys are organization specific
# check if the config contains the organization key
if "organization" in config:
openai.organization = config["organization"]
# other misc config
MAX_TOKENS = config.get("max_tokens", 1024)
# read the agent config
agents = agents_from_file("agents.json")
judge = agents.get(name = "judge-analytic")
speakers = agents.get_all(behavior = "speaker")
read_loop(messages, speakers, judge)
| [
"(Helper 0) \nPLACEHOLDER"
] |
2024-01-10 | Bestest-Study-Group/sentimentIndex | worker~sentiment_analyser~newssenti.py | import http.client
import json
import time
from math import floor
import cohere
from cohere.classify import Example
from dotenv import load_dotenv
import os
from datetime import datetime
colin = [
Example("MSCI Inc. stock rises Wednesday, still underperforms market", "negative"),
Example("DraftKings Inc. stock rises Wednesday, outperforms market", "positive"),
Example("Willis Towers Watson PLC stock falls Tuesday, still outperforms market", "positive"),
Example("ONEOK Inc. stock rises Tuesday, outperforms market", "positive"),
Example("Marathon Oil Corp. stock falls Tuesday, still outperforms market", "positive"),
Example("Intuitive Surgical Inc. stock falls Tuesday, underperforms market", "positive"),
Example("Kohl's Corp. stock falls Monday, underperforms market", "negative"),
Example("Intuit Inc. stock rises Monday, still underperforms market", "negative"),
Example("Dow Inc. stock falls Monday, underperforms market", "negative"),
Example("Walgreens Boots Alliance Inc. stock rises Thursday, still underperforms market", "negative"),
Example("Waste Management Inc. stock rises Thursday, still underperforms market", "negative"),
Example("Teleflex Inc. stock rises Thursday, still underperforms market", "negative"),
Example("Public Storage stock rises Thursday, still underperforms market", "negative"),
Example("Kohl's Corp. stock rises Thursday, outperforms market", "positive"),
Example("Johnson Controls International PLC stock rises Thursday, outperforms market", "positive"),
Example("Regency Centers Corp. stock rises Friday, outperforms market", "positive"),
Example("Snap-On Inc. stock rises Friday, still underperforms market", "negative"),
Example("Cooper Cos. stock rises Friday, still underperforms market", "negative"),
Example("Unum Group stock rises Wednesday, still underperforms market", "negative"),
Example("United Rentals Inc. stock rises Wednesday, outperforms market", "positive"),
Example("Target Corp. stock outperforms market on strong trading day", "positive"),
Example("Snap Inc. stock rises Wednesday, outperforms market", "positive"),
Example("Paramount Global Cl B stock outperforms market on strong trading day", "positive"),
Example("Live Nation Entertainment Inc. stock rises Wednesday, outperforms market", "positive"),
Example("International Flavors & Fragrances Inc. stock rises Wednesday, still underperforms market", "negative"),
]
load_dotenv()
COHERE = os.getenv('COHERE')
FREE_NEWS_API = os.getenv('FREE_NEWS_API')
co = cohere.Client(COHERE)
def load_articles(page):
conn = http.client.HTTPSConnection("free-news.p.rapidapi.com")
headers = {
'X-RapidAPI-Host': "free-news.p.rapidapi.com",
'X-RapidAPI-Key': FREE_NEWS_API
}
conn.request("GET", "/v1/search?q=stock%20market&lang=en&page={}".format(page), headers=headers)
res = conn.getresponse()
data = res.read()
articles = json.loads(data)["articles"]
return articles
def classify_articles(articles):
titles = []
dates = []
i = 0
for article in articles:
titles.append(article["title"])
date = datetime.strptime(article["published_date"], '%Y-%m-%d %H:%M:%S').timestamp()
print(article["published_date"])
print(date)
dates.append(int(date))
i = i + 1
if (i > 31):
break
classifications = co.classify(
model='medium',
taskDescription='Classify these as positive, negative',
outputIndicator='Classify this stock',
inputs=titles,
examples =colin
)
output = []
i = 0
for cl in classifications.classifications:
output.append({
'title': cl.input,
'sentiment': cl.prediction,
'date': dates[i],
'confidence': {
'positive': cl.confidence[0].confidence,
'negative': cl.confidence[1].confidence
}
})
i = i + 1
return output
# sum = 0.0
# print("[")
# for article in articles:
# if (article["summary"] == None):
# blob = TextBlob(article["title"])
# article["summary"] = "No Article"
# else:
# blob = TextBlob(article["summary"])
# senti = blob.sentiment.polarity
# sum += senti
# print("Example(\"" + article["title"] + "\", \"\"),")
# print("]")
# #print("OVERALL SENTIMENT: " + str(sum/len(articles)*5))
def run():
classifications = []
for i in range(1, 3):
articles = load_articles(i)
classifications.extend(classify_articles(articles))
time.sleep(5)
temp_data = {}
for out in classifications:
today = datetime.now()
temp = datetime.fromtimestamp(out['date'])
days = floor((today - temp).total_seconds() / (60*60*24))
if (days in temp_data):
if (out['sentiment'] == 'negative'):
temp_data[days].append(-1 * out['confidence']['negative'])
else:
temp_data[days].append(out['confidence']['positive'])
else:
if (out['sentiment'] == 'negative'):
temp_data[days] = [(-1 * out['confidence']['negative'])]
else:
temp_data[days] = [(out['confidence']['positive'])]
final = [None]*30
for td in temp_data:
sum = 0
for t in temp_data[td]:
sum += t
final[td-1] = sum / len(temp_data[td])
return final
# return classifications
run() | [] |
2024-01-10 | Bestest-Study-Group/sentimentIndex | worker~sentiment_analyser~redditsenti.py | from math import floor
import cohere
from cohere.classify import Example
from dotenv import load_dotenv
import os
import praw
from datetime import datetime
load_dotenv()
COHERE = os.getenv('COHERE')
REDDIT_CLIENT_ID = os.getenv('REDDIT_CLIENT_ID')
REDDIT_CLIENT_SECRET = os.getenv('REDDIT_CLIENT_SECRET')
co = cohere.Client(COHERE)
def get_reddit_posts():
reddit = praw.Reddit(
client_id=REDDIT_CLIENT_ID,
client_secret=REDDIT_CLIENT_SECRET,
user_agent='my user agent'
)
subreddit = reddit.subreddit('stocks')
# submissions = subreddit.search(stock, sort='relevance', time_filter='month')
submissions = subreddit.top(time_filter='week', limit=32)
return submissions
def classify_submissions(submissions):
fmt = '%Y-%m-%d'
titles = []
dates = []
i = 0
for submission in submissions:
titles.append(submission.title)
# dates.append(datetime.fromtimestamp(submission.created_utc).strftime(fmt))
dates.append(submission.created_utc)
i = i + 1
if (i > 31):
break
classifications = co.classify(
model='medium',
taskDescription='Classify these as positive, negative',
outputIndicator='Classify this stock',
inputs=titles,
examples=[
Example("More Room For Growth In Pioneer Energy Stock?", "positive"),
Example("After Dismal Performance Last Month, L'Oreal Stock Looks Set To Rebound", "positive"),
Example("The stock market is close to finding its bottom as corporate share buybacks surge to record highs, JPMorgan says", "positive"),
Example("How Do You Stay Confident in a Market Crash?", "negative"),
Example("Here's 1 of the Biggest Problems With Airbnb Stock", "negative"),
Example("GameStop Unveils Crypto and NFT Wallet, Shares up 3%", "positive"),
Example("Should You Buy Stocks With An Impending Bear Market And Possible Recession?", "negative"),
Example("Costco Q3 Earnings Preview: Don't Fall With It Any Longer (NASDAQ:COST)", "negative"),
Example("Bear Market Has Only Just Begun", "negative"),
Example("Photronics stock gains on guiding FQ3 above consensus after FQ2 beat (NASDAQ:PLAB)", "positive"),
Example("Texas Instruments Stock: Playing The Long Game; Buy Now (NASDAQ:TXN)", "positive"),
Example("U.S.-NEW YORK-STOCK MARKET-RISE", "positive"),
Example("Chart Check: Record high in sight! This stock from agrochemical space is a good buy on dips bet", "positive"),
Example("MSCI Inc. stock rises Wednesday, still underperforms market", "negative"),
Example("DraftKings Inc. stock rises Wednesday, outperforms market", "positive"),
Example("Willis Towers Watson PLC stock falls Tuesday, still outperforms market", "positive"),
Example("ONEOK Inc. stock rises Tuesday, outperforms market", "positive"),
Example("Marathon Oil Corp. stock falls Tuesday, still outperforms market", "positive"),
Example("Intuitive Surgical Inc. stock falls Tuesday, underperforms market", "negative"),
Example("Kohl's Corp. stock falls Monday, underperforms market", "negative"),
Example("Intuit Inc. stock rises Monday, still underperforms market", "negative"),
Example("Dow Inc. stock falls Monday, underperforms market", "negative"),
Example("Walgreens Boots Alliance Inc. stock rises Thursday, still underperforms market", "negative"),
Example("Waste Management Inc. stock rises Thursday, still underperforms market", "negative"),
Example("Teleflex Inc. stock rises Thursday, still underperforms market", "negative"),
Example("Public Storage stock rises Thursday, still underperforms market", "negative"),
Example("Kohl's Corp. stock rises Thursday, outperforms market", "positive"),
Example("Johnson Controls International PLC stock rises Thursday, outperforms market", "positive"),
Example("Regency Centers Corp. stock rises Friday, outperforms market", "positive"),
Example("Snap-On Inc. stock rises Friday, still underperforms market", "negative"),
Example("Cooper Cos. stock rises Friday, still underperforms market", "negative"),
Example("Unum Group stock rises Wednesday, still underperforms market", "negative"),
Example("United Rentals Inc. stock rises Wednesday, outperforms market", "positive"),
Example("Target Corp. stock outperforms market on strong trading day", "positive"),
Example("Snap Inc. stock rises Wednesday, outperforms market", "positive"),
Example("Paramount Global Cl B stock outperforms market on strong trading day", "positive"),
Example("Live Nation Entertainment Inc. stock rises Wednesday, outperforms market", "positive"),
Example("International Flavors & Fragrances Inc. stock rises Wednesday, still underperforms market", "negative"),
Example('The Nasdaq fell 2.5% today, while TSLA fell 8%', 'negative')
])
output = []
i = 0
for cl in classifications.classifications:
# print("TITLE: {}".format(cl.input))
# print("SENTIMENT: {}".format(cl.prediction))
# print("POSITIVE CONFIDENCE: {}".format(cl.confidence[0].confidence))
# print("NEGATIVE CONFIDENCE: {}".format(cl.confidence[1].confidence))
# print("DATE: {}".format(dates[i]))
output.append({
'title': cl.input,
'sentiment': cl.prediction,
'date': dates[i],
'confidence': {
'positive': cl.confidence[0].confidence,
'negative': cl.confidence[1].confidence
}
})
# temp[]
# output.append(IndividualClassification(cl.input, cl.prediction, dates[i], cl.confidence[0].confidence, cl.confidence[1].confidence))
i = i + 1
temp_data = {}
for out in output:
today = datetime.now()
temp = datetime.fromtimestamp(out['date'])
days = floor((today - temp).total_seconds() / (60*60*24))
if (days in temp_data):
if (out['sentiment'] == 'negative'):
temp_data[days].append(-1 * out['confidence']['negative'])
else:
temp_data[days].append(out['confidence']['positive'])
else:
if (out['sentiment'] == 'negative'):
temp_data[days] = [(-1 * out['confidence']['negative'])]
else:
temp_data[days] = [(out['confidence']['positive'])]
final = [None]*30
for td in temp_data:
sum = 0
for t in temp_data[td]:
sum += t
final[td-1] = sum / len(temp_data[td])
return final
# return('The confidence levels of the labels are: {}'.format(
# classifications.classifications))
def run():
posts = get_reddit_posts()
classifications = classify_submissions(posts)
# for cl in classifications:
# print("{}: {}".format(cl.title, cl.sentiment))
return classifications | [] |
2024-01-10 | andybaumgar/nycmesh-calendar-bot | src~calendar_bot~event_extractor.py | import json
import os
from dataclasses import asdict, dataclass
from datetime import datetime
from typing import Dict, TypedDict
import openai
import pytz
from dataclasses_json import dataclass_json
from dotenv import load_dotenv
from . import config
from .utils.format_event_description import add_description_disclaimer
load_dotenv()
openai.api_key = os.environ["OPENAI_API_KEY"]
@dataclass_json
@dataclass
class EventData:
date: datetime
title: str
is_event: bool
description: str
def unix_time_to_human_date(unix_time: str) -> str:
return datetime.fromtimestamp(int(float(unix_time))).strftime("%Y-%m-%d")
def get_calendar_prompt(date_ts: str, message: str) -> str:
human_date = unix_time_to_human_date(date_ts)
prompt = config.prompt.format(date=human_date, message=message)
return prompt
# Define a function to send a message to ChatGPT 3.5
def execute_prompt(prompt: str) -> str:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
],
)
return completion.choices[0].message
def get_event_data(date_ts: str, message: str) -> EventData:
if message == "":
return EventData(date=None, title=None, is_event=False, description="")
prompt = get_calendar_prompt(date_ts, message)
response = execute_prompt(prompt)
data = json.loads(response["content"])
if data["is_event"]:
# convert string to date and then back to UTC for later serialization
date_naive = datetime.strptime(data["date"], "%Y-%m-%dT%H:%M:%S")
date_local = pytz.timezone("US/Eastern").localize(date_naive)
date_utc = date_local.astimezone(pytz.utc)
data["date"] = date_utc
else:
data["date"] = None
event_data = EventData(**data, description=message)
return event_data
def get_event_data_summary(event_data: EventData, link: str) -> str:
local_date = event_data.date.astimezone(pytz.timezone("US/Eastern"))
human_readable_date = local_date.strftime("%A, %B %d, %Y %I:%M %p")
disclaimer = add_description_disclaimer(link)
summary = f"""*Title:* {event_data.title}\n*Date:* {human_readable_date} \n*Description:* {disclaimer}"""
return summary
| [
"You are a helpful assistant."
] |
2024-01-10 | charles-c-chiang/Discord-Bot | example_bot.py | # This example requires the 'message_content' intent.
import discord
import json
import asyncio
import time
from datetime import datetime
from discord.ext import tasks, commands
from openai import AsyncAzureOpenAI
f = open('auth.json')
token = json.load(f)['BOT_TOKEN']
f = open('auth.json')
OPENAI_API_KEY = json.load(f)['OPENAI_API_KEY']
intents = discord.Intents.default()
intents.message_content = True
client = discord.Client(intents=intents)
bot = commands.Bot(command_prefix='/', intents=intents)
@bot.command()
async def test(ctx, arg):
await ctx.send(arg)
AIclient = AsyncAzureOpenAI(api_key = OPENAI_API_KEY,
api_version = "2023-07-01-preview",
azure_endpoint="https://charlietest.openai.azure.com/openai/deployments/charlieTestModel/chat/completions?api-version=2023-07-01-preview")
message_text = [{"role":"system","content":"You are StrangerBot, an introduction chatbot. Your job is to interact with two different strangers, and help them get to know each other better. You can do this by recommending them topics to talk about or asking them to share more about themselves. You should start each conversation by introducing yourself, then by asking both people their name and some basic facts about them. You should not have overly lengthy or detailed responses, as the focus of the conversation should be between the two strangers and you want to keep a friendly, casual tone. "}]
@client.event
async def on_ready():
print(f'We have logged in as {client.user}')
channel = client.get_channel(1169885828265279508)
await channel.send(f'Logged in as Bot! Time is {datetime.now()}')
last_message = 0
@client.event
async def on_message(message):
global message_text
global last_message
if message.author == client.user:
return
if message.content.startswith('$hello'):
await message.channel.send('Hello!')
return
# Dump conversation to file and quit
if message.content.startswith('$quit'):
await message.channel.send('Goodbye!')
with open("convo.json", "w") as f:
json.dump(message_text, f, indent=4)
await client.close()
return
# create log of conversation for client
message_text.append({"role": "user", "content": message.content,})
# send conversation to aclient, await response
completion = await AIclient.chat.completions.create(
model="deployment-name", # e.g. gpt-35-instant
messages=message_text
)
# add to log of conversation
message_text.append({"role": "assistant", "content" : completion.choices[0].message.content})
print(message_text)
# send aclient message to discord
print(completion.choices[0].message.content)
await message.channel.send(completion.choices[0].message.content)
# elif message.content.startswith('$typing'):
# channel = client.get_channel(1169885828265279508)
# await channel.typing()
# await asyncio.sleep(3)
# await channel.send('Done typing!')
# message_to_send = 'You said: "' + message.content + '"'
# await message.channel.send(message_to_send)
# if last_message == 0:
# await message.channel.send('this is the first message!')
# else :
# await message.channel.send(f'time since last message: {time.time() - last_message} seconds')
# last_message = time.time()
# @client.event
# async def on_typing(channel, user, when):
# # time_elapsed = time.time()-last_message
# await channel.send(f'{user} is typing.')
# # last_message = time.time()
client.run(token)
| [
"You are StrangerBot, an introduction chatbot. Your job is to interact with two different strangers, and help them get to know each other better. You can do this by recommending them topics to talk about or asking them to share more about themselves. You should start each conversation by introducing yourself, then by asking both people their name and some basic facts about them. You should not have overly lengthy or detailed responses, as the focus of the conversation should be between the two strangers and you want to keep a friendly, casual tone. "
] |
2024-01-10 | insarwxw/PySAR | pysar~modify_network.py | #! /usr/bin/env python2
############################################################
# Program is part of PySAR v1.0 #
# Copyright(c) 2013, Heresh Fattahi #
# Author: Heresh Fattahi #
############################################################
#
# Yunjun, Jul 2015: Update Coherence file
# Yunjun, Oct 2015: Add 'T' option for template file input
# and pysar.dropIfgIndex in template content
# Yunjun, Jan 2017: add spatial_average(), update_inps_from_template()
# modify_file_date12_list(), cmdLineParse()
# merge update_network() into this.
# add coherence-based network modification.
import os
import sys
import argparse
import h5py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pysar._datetime as ptime
import pysar._network as pnet
import pysar._pysar_utilities as ut
import pysar._readfile as readfile
import pysar.subset as subset
from pysar._readfile import multi_group_hdf5_file, multi_dataset_hdf5_file, single_dataset_hdf5_file
########################### Sub Function #############################
def nearest_neighbor(x,y, x_array, y_array):
""" find nearest neighbour
Input:
x/y : float
x/y_array : numpy.array, temporal/perpendicular spatial baseline
Output:
idx : int, index of min distance - nearest neighbour
"""
dist = np.sqrt((x_array -x)**2 + (y_array -y)**2)
idx = np.argmin(dist)
#idx = dist==np.min(dist)
return idx
def reset_pairs(File):
'''Reset/restore all pairs within the input file by set all drop_ifgram=no'''
print "set drop_ifgram to 'no' for all interferograms for file: "+File
k = readfile.read_attribute(File)['FILE_TYPE']
h5 = h5py.File(File,'r+')
ifgram_list = sorted(h5[k].keys())
for ifgram in ifgram_list:
h5[k][ifgram].attrs['drop_ifgram'] = 'no'
h5.close()
return File
def manual_select_pairs_to_remove(File):
'''Manually select interferograms to remove'''
print '----------------------------------------------------------------------------'
print 'Manually select interferograms to remove'
print 'Click two dates - points - in the figure to select one pair of interferogram'
print 'repeat until you select all pairs you would like to remove'
print 'then close the figure to continue the program ...'
print '----------------------------------------------------------------------------'
# Display the network
fig = plt.figure()
ax = fig.add_subplot(111)
date12_orig = pnet.get_date12_list(File)
bperp_list = ut.perp_baseline_ifgram2timeseries(File)[0].tolist()
date8_list = ptime.ifgram_date_list(File)
ax = pnet.plot_network(ax, date12_orig, date8_list, bperp_list)
print 'display the network of interferogram of file: '+File
date6_list = ptime.yymmdd(date8_list)
dates_array = np.array(ptime.date_list2vector(date8_list)[0])
dateNum_array = mdates.date2num(dates_array)
bperp_array = np.array(bperp_list)
date_click = []
date12_click = []
def onclick(event):
xClick = event.xdata
yClick = event.ydata
idx = nearest_neighbor(xClick, yClick, dateNum_array, bperp_array)
date6 = date6_list[idx]
print 'click at '+date6
date_click.append(date6)
if len(date_click)%2 == 0 and date_click[-2] != date_click[-1]:
[m_date, s_date] = sorted(date_click[-2:])
m_idx = date6_list.index(m_date)
s_idx = date6_list.index(s_date)
date12 = m_date+'-'+s_date
if date12 in date12_orig:
print 'select date12: '+date12
date12_click.append(date12)
ax.plot([dateNum_array[m_idx],dateNum_array[s_idx]], [bperp_array[m_idx],bperp_array[s_idx]], 'r', lw=4)
else:
print date12+' is not existed in input file'
plt.draw()
cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
return date12_click
def modify_file_date12_list(File, date12_to_rmv, mark_attribute=False, outFile=None):
'''Update multiple group hdf5 file using date12 to remove
Inputs:
File - multi_group HDF5 file, i.e. unwrapIfgram.h5, coherence.h5
date12_to_rmv - list of string indicating interferograms in YYMMDD-YYMMDD format
mark_attribute- bool, if True, change 'drop_ifgram' attribute only; otherwise, write
resutl to a new file
outFile - string, output file name
Output:
outFile - string, output file name, if mark_attribute=True, outFile = File
'''
k = readfile.read_attribute(File)['FILE_TYPE']
print '----------------------------------------------------------------------------'
print 'file: '+File
if mark_attribute:
print "set drop_ifgram to 'yes' for all interferograms to remove, and 'no' for all the others."
h5 = h5py.File(File,'r+')
ifgram_list = sorted(h5[k].keys())
for ifgram in ifgram_list:
if h5[k][ifgram].attrs['DATE12'] in date12_to_rmv:
h5[k][ifgram].attrs['drop_ifgram'] = 'yes'
else:
h5[k][ifgram].attrs['drop_ifgram'] = 'no'
h5.close()
outFile = File
else:
date12_orig = pnet.get_date12_list(File)
date12_to_write = sorted(list(set(date12_orig) - set(date12_to_rmv)))
print 'number of interferograms in file : '+str(len(date12_orig))
print 'number of interferograms to keep/write: '+str(len(date12_to_write))
print 'list of interferograms to keep/write: '
print date12_to_write
date12Num = len(date12_to_write)
if not outFile:
outFile = 'Modified_'+os.path.basename(File)
print 'writing >>> '+outFile
h5out = h5py.File(outFile, 'w')
gg = h5out.create_group(k)
h5 = h5py.File(File, 'r')
igramList = sorted(h5[k].keys())
date12_list = ptime.list_ifgram2date12(igramList)
prog_bar = ptime.progress_bar(maxValue=date12Num, prefix='writing: ')
for i in range(date12Num):
date12 = date12_to_write[i]
idx = date12_orig.index(date12)
igram = igramList[idx]
data = h5[k][igram].get(igram)[:]
group = gg.create_group(igram)
dset = group.create_dataset(igram, data=data, compression='gzip')
for key, value in h5[k][igram].attrs.iteritems():
group.attrs[key] = value
prog_bar.update(i+1, suffix=date12_list[i])
prog_bar.close()
h5.close()
h5out.close()
print 'finished writing >>> '+outFile
return outFile
def read_template2inps(template_file, inps=None):
'''Read input template options into Namespace inps'''
if not inps:
inps = cmdLineParse()
template = readfile.read_template(inps.template_file)
key_list = template.keys()
# Coherence-based network modification
prefix = 'pysar.network.'
key = prefix+'coherenceBased'
if key in key_list and template[key] in ['auto','yes']:
inps.coherence_based = True
key = prefix+'coherenceFile'
if key in key_list:
if template[key] == 'auto':
inps.coherence_file = 'coherence.h5'
else:
inps.coherence_file = template[key]
# find coherence file from input files if inps.coherence_file does not exists.
if inps.coherence_based and not os.path.isfile(inps.coherence_file):
k_list = [readfile.read_attribute(f)['FILE_TYPE'] for f in inps.file]
try:
coh_file_idx = k_list.index('coherence')
except ValueError:
print 'No coherence file found! Can not use coherence-based method without it.'
inps.coherence_file = inps.file[coh_file_idx]
key = prefix+'minCoherence'
if key in key_list:
if template[key] == 'auto':
inps.min_coherence = 0.7
else:
inps.min_coherence = float(template[key])
key = prefix+'maskFile'
if key in key_list:
value = template[key]
if value == 'auto':
inps.mask_file = 'mask.h5'
elif value == 'no':
inps.mask_file = None
else:
inps.mask_file = value
key = prefix+'maskAoi.yx'
if key in key_list:
value = template[key]
if value in ['auto','no']:
inps.aoi_pix_box = None
else:
tmp = [i.strip() for i in value.split(',')]
sub_y = sorted([int(i.strip()) for i in tmp[0].split(':')])
sub_x = sorted([int(i.strip()) for i in tmp[1].split(':')])
inps.aoi_pix_box = (sub_x[0], sub_y[0], sub_x[1], sub_y[1])
key = prefix+'maskAoi.lalo'
if key in key_list:
value = template[key]
if value in ['auto','no']:
inps.aoi_geo_box = None
else:
tmp = [i.strip() for i in value.split(',')]
sub_lat = sorted([float(i.strip()) for i in tmp[0].split(':')])
sub_lon = sorted([float(i.strip()) for i in tmp[1].split(':')])
inps.aoi_geo_box = (sub_lon[0], sub_lat[1], sub_lon[1], sub_lat[0])
# Check trans file
try:
inps.trans_file = ut.get_file_list(inps.trans_file)[0]
except:
inps.trans_file = None
print 'Warning: no mapping transformation file found! Can not use '+key+' option without it.'
print 'skip this option.'
inps.aoi_pix_box = None
## Network Modification based on thresholds
key = prefix+'tempBaseMax'
if key in key_list:
value = template[key]
if value not in ['auto','no']:
inps.max_temp_baseline = float(value)
key = prefix+'perpBaseMax'
if key in key_list:
value = template[key]
if value not in ['auto','no']:
inps.max_perp_baseline = float(value)
key = prefix+'referenceFile'
if key in key_list:
value = template[key]
if value in ['auto','no']:
inps.reference_file = None
else:
inps.reference_file = value
key = prefix+'excludeDate'
if key in key_list:
value = template[key]
if value not in ['auto','no']:
inps.exclude_date = [i for i in value.replace(',',' ').split()]
key = prefix+'excludeIfgIndex'
if key in key_list:
value = template[key]
if value not in ['auto','no']:
inps.exclude_ifg_index = [i for i in value.replace(',',' ').split()]
return inps
############################### Usage ################################
EXAMPLE='''example:
modify_network.py unwrapIfgram.h5 coherence.h5 --template pysarApp_template.txt --trans geomap_4rlks.trans
modify_network.py unwrapIfgram.h5 coherence.h5 -t 365 -b 200
modify_network.py unwrapIfgram.h5 coherence.h5 --coherence-base coherence.h5 --mask Mask.h5 --min-coherence 0.7
modify_network.py unwrapIfgram.h5 -r Modified_coherence.h5
modify_network.py unwrapIfgram.h5 --start-date 20080520 --end-date 20110101
modify_network.py unwrapIfgram.h5 --exclude-date 20080520 20090816
modify_network.py unwrapIfgram.h5 --exclude-ifg-index 3:9 11 23
modify_network.py unwrapIfgram.h5 --manual
'''
TEMPLATE='''
pysar.network.coherenceBased = yes #[yes / no], auto for yes
pysar.network.coherenceFile = auto #[filename], auto for coherence.h5
pysar.network.minCoherence = auto #[0.0-1.0], auto for 0.7
pysar.network.maskFile = auto #[file name, no], auto for mask.h5, no for all pixels
pysar.network.maskAoi.yx = no #[y0:y1,x0:x1 / no], area of interest for coherence calculation, auto for no
pysar.network.maskAoi.lalo = no #[lat0:lat1,lon0:lon1 / no], similar to maskAoi.yx but in lat/lon, auto for no
pysar.network.tempBaseMax = 36500 #[1-inf], day, maximum temporal baseline, auto for 3.65e4
pysar.network.perpBaseMax = 10e3 #[1-inf], meter, maximum perpendicular spatial baseline, auto for 10e3
pysar.network.referenceFile = no #[date12_list.txt / Modified_unwrapIfgram.h5 / no], auto for no
pysar.network.excludeDate = no #[20080520,20090817 / no], auto for no
pysar.network.excludeIfgIndex = no #[1,3,25 / no], list of interferogram number starting from 1, auto for no
'''
def cmdLineParse():
parser = argparse.ArgumentParser(description='Modify the network of interferograms',\
formatter_class=argparse.RawTextHelpFormatter,\
epilog=EXAMPLE)
parser.add_argument('file', nargs='+',\
help='Files to modify/drop network.\n'\
'i.e. unwrapIfgram.h5, wrapIfgram.h5, coherence.h5, ...')
parser.add_argument('--reset', action='store_true',\
help='restore all interferograms existed in the file, by marking all drop_ifgram=no')
parser.add_argument('--write-file', dest='mark_attribute', action='store_false',\
help='mark dropped interferograms in attribute only, do not write new file')
parser.add_argument('--plot', action='store_true', help='plot and save the result to image files.')
parser.add_argument('-t', dest='max_temp_baseline', type=float, help='temporal baseline threshold/maximum in days')
parser.add_argument('-b', dest='max_perp_baseline', type=float, help='perpendicular baseline threshold/maximum in meters')
parser.add_argument('-r','--reference', dest='reference_file',\
help='Reference hdf5 / list file with network information.\n'\
'i.e. Modified_unwrapIfgram.h5, Pairs.list')
parser.add_argument('--template', dest='template_file', help='Template file with input options:\n'+TEMPLATE+'\n')
parser.add_argument('--exclude-ifg-index', dest='exclude_ifg_index', nargs='*',\
help='index of interferograms to remove/drop.\n1 as the first')
parser.add_argument('--exclude-date', dest='exclude_date', nargs='*',\
help='date(s) to remove/drop, all interferograms included date(s) will be removed')
parser.add_argument('--start-date','--min-date', dest='start_date',\
help='remove/drop interferograms with date earlier than start-date in YYMMDD or YYYYMMDD format')
parser.add_argument('--end-date','--max-date', dest='end_date',\
help='remove/drop interferograms with date later than end-date in YYMMDD or YYYYMMDD format')
# Coherence-based network
cohBased = parser.add_argument_group('Coherence-based Network',\
'Drop/modify network based on spatial coherence')
cohBased.add_argument('--coherence-based', dest='coherence_based', action='store_true',\
help='Enable coherence-based network modification')
cohBased.add_argument('--coherence', dest='coherence_file', default='coherence.h5',\
help='Coherence file used to calculate average value for each interferograms\n'+\
'Input coherence file should have the same network as input file(s)\n'+\
'default: coherence.h5')
cohBased.add_argument('--mask', dest='mask_file',\
help='Mask file used to calculate the spatial coherence\n'\
'Will use the whole area if not assigned')
cohBased.add_argument('--min-coherence', dest='min_coherence', type=float, default=0.7,\
help='Minimum coherence value')
cohBased.add_argument('--trans', dest='trans_file', default='geomap*.trans',\
help='mapping transformation file for geo/radar coordinate conversion.\n'+\
'Needed for mask AOI in lalo')
# Manually select network
manual = parser.add_argument_group('Manual Network', 'Manually select/drop/modify network')
manual.add_argument('--manual', dest='disp_network', action='store_true',\
help='display network to manually choose line/interferogram to remove')
inps = parser.parse_args()
inps.aoi_geo_box = None
inps.aoi_pix_box = None
return inps
######################### Main Function ##############################
def main(argv):
##### Read Inputs
inps = cmdLineParse()
inps.file = ut.get_file_list(inps.file)
date12_orig = pnet.get_date12_list(inps.file[0])
print 'input file(s) to be modified: '+str(inps.file)
print 'number of interferograms: '+str(len(date12_orig))
atr = readfile.read_attribute(inps.file[0])
#print '\n****************** Network Modification ********************'
if inps.reset:
print '----------------------------------------------------------------------------'
for file in inps.file:
reset_pairs(file)
return
# Update inps if template is input
if inps.template_file:
inps = read_template2inps(inps.template_file, inps)
if all(not i for i in [inps.reference_file, inps.max_temp_baseline, inps.max_perp_baseline,\
inps.exclude_ifg_index, inps.exclude_date, inps.coherence_based, inps.start_date, inps.end_date]):
# Display network for manually modification when there is no other modification input.
print 'No input option found to remove interferogram'
print 'To manually modify network, please use --manual option '
return
# Convert index : input to continous index list
if inps.exclude_ifg_index:
ifg_index = list(inps.exclude_ifg_index)
inps.exclude_ifg_index = []
for index in ifg_index:
index_temp = [int(i) for i in index.split(':')]
index_temp.sort()
if len(index_temp)==2:
for j in range(index_temp[0], index_temp[1]+1):
inps.exclude_ifg_index.append(j)
elif len(index_temp)==1:
inps.exclude_ifg_index.append(int(index))
else:
print 'Unrecoganized input: '+index
inps.exclude_ifg_index = sorted(inps.exclude_ifg_index)
if max(inps.exclude_ifg_index) > len(date12_orig):
raise Exception('Input index out of range!\n'+\
'input index:'+str(inps.exclude_ifg_index)+'\n'+\
'index range of file: '+str(len(date12_orig)))
##### Get date12_to_rmv
date12_to_rmv = []
# 1. Update date12_to_rmv from reference file
if inps.reference_file:
date12_to_keep = pnet.get_date12_list(inps.reference_file)
print '----------------------------------------------------------------------------'
print 'use reference pairs info from file: '+inps.reference_file
print 'number of interferograms in reference: '+str(len(date12_to_keep))
print 'date12 not in reference file:'
for date12 in date12_orig:
if date12 not in date12_to_keep:
date12_to_rmv.append(date12)
print date12
# 2.1 Update date12_to_rmv from coherence file
if inps.coherence_based and os.path.isfile(inps.coherence_file):
print '----------------------------------------------------------------------------'
print 'use coherence-based network modification from coherence file: '+inps.coherence_file
# check mask AOI in lalo
if inps.aoi_geo_box and inps.trans_file:
print 'input AOI in (lon0, lat1, lon1, lat0): '+str(inps.aoi_geo_box)
inps.aoi_pix_box = subset.bbox_geo2radar(inps.aoi_geo_box, atr, inps.trans_file)
if inps.aoi_pix_box:
print 'input AOI in (x0,y0,x1,y1): '+str(inps.aoi_pix_box)
# Calculate spatial average coherence
coh_list, coh_date12_list = ut.get_spatial_average(inps.coherence_file, inps.mask_file,\
inps.aoi_pix_box, saveList=True)
# MST network
print 'Get minimum spanning tree (MST) of interferograms with inverse of coherence.'
mst_date12_list = pnet.threshold_coherence_based_mst(coh_date12_list, coh_list)
print 'date12 with average coherence < '+str(inps.min_coherence)+' and not in MST: '
for i in range(len(coh_date12_list)):
date12 = coh_date12_list[i]
if coh_list[i] < inps.min_coherence and date12 not in mst_date12_list:
date12_to_rmv.append(date12)
print date12
# 2.2 Update date12_to_rmv from perp baseline threshold
if inps.max_perp_baseline:
print '----------------------------------------------------------------------------'
print 'Drop pairs with perpendicular spatial baseline > '+str(inps.max_perp_baseline)+' meters'
ifg_bperp_list = pnet.igram_perp_baseline_list(inps.file[0])
for i in range(len(ifg_bperp_list)):
if ifg_bperp_list[i] > inps.max_perp_baseline:
date12 = date12_orig[i]
date12_to_rmv.append(date12)
print date12
# 2.3 Update date12_to_rmv from temp baseline threshold
if inps.max_temp_baseline:
print '----------------------------------------------------------------------------'
print 'Drop pairs with temporal baseline > '+str(inps.max_temp_baseline)+' days'
date8_list = ptime.ifgram_date_list(inps.file[0])
date6_list = ptime.yymmdd(date8_list)
tbase_list = ptime.date_list2tbase(date8_list)[0]
for i in range(len(date12_orig)):
date1, date2 = date12_orig[i].split('-')
idx1 = date6_list.index(date1)
idx2 = date6_list.index(date2)
t_diff = tbase_list[idx2] - tbase_list[idx1]
if t_diff > inps.max_temp_baseline:
date12 = date12_orig[i]
date12_to_rmv.append(date12)
print date12
# 2.4 Update date12_to_rmv from exclude_ifg_index
if inps.exclude_ifg_index:
print '----------------------------------------------------------------------------'
print 'drop date12/pair with the following index number:'
for index in inps.exclude_ifg_index:
date12 = date12_orig[index-1]
date12_to_rmv.append(date12)
print str(index)+' '+date12
# 2.5 Update date12_to_rmv from exclude_date
if inps.exclude_date:
inps.exclude_date = ptime.yymmdd(inps.exclude_date)
print '----------------------------------------------------------------------------'
print 'Drop pairs including the following dates: \n'+str(inps.exclude_date)
for i in range(len(date12_orig)):
date1, date2 = date12_orig[i].split('-')
if (date1 in inps.exclude_date) or (date2 in inps.exclude_date):
date12 = date12_orig[i]
date12_to_rmv.append(date12)
print date12
# 2.6 Update date12_to_rmv from start_date
if inps.start_date:
inps.start_date = ptime.yymmdd(inps.start_date)
print '----------------------------------------------------------------------------'
print 'Drop pairs with date earlier than start-date: '+inps.start_date
min_date = int(ptime.yyyymmdd(inps.start_date))
for i in range(len(date12_orig)):
date12 = date12_orig[i]
if any(int(j) < min_date for j in ptime.yyyymmdd(date12.split('-'))):
date12_to_rmv.append(date12)
print date12
# 2.7 Update date12_to_rmv from end_date
if inps.end_date:
inps.end_date = ptime.yymmdd(inps.end_date)
print '----------------------------------------------------------------------------'
print 'Drop pairs with date earlier than end-date: '+inps.end_date
max_date = int(ptime.yyyymmdd(inps.end_date))
for i in range(len(date12_orig)):
date12 = date12_orig[i]
if any(int(j) > max_date for j in ptime.yyyymmdd(date12.split('-'))):
date12_to_rmv.append(date12)
print date12
# 3. Manually drop pairs
if inps.disp_network:
date12_click = manual_select_pairs_to_remove(inps.file[0])
for date12 in list(date12_click):
if date12 not in date12_orig:
date12_click.remove(date12)
print 'date12 selected to remove:'
print date12_click
date12_to_rmv += date12_click
# 4. drop duplicate date12 and sort in order
date12_to_rmv = list(set(date12_to_rmv))
date12_to_rmv = sorted(date12_to_rmv)
print '----------------------------------------------------------------------------'
print 'number of interferograms to remove: '+str(len(date12_to_rmv))
print 'list of interferograms to remove:'
print date12_to_rmv
# Check existing mark for --mark-attribute option
if inps.mark_attribute:
# Get list of date12 of interferograms already been marked.
k = readfile.read_attribute(inps.file[0])['FILE_TYPE']
h5 = h5py.File(inps.file[0], 'r')
ifgram_list_all = sorted(h5[k].keys())
ifgram_list_keep = ut.check_drop_ifgram(h5, atr, ifgram_list_all, print_message=False)
ifgram_list_dropped = sorted(list(set(ifgram_list_all) - set(ifgram_list_keep)))
date12_list_dropped = ptime.list_ifgram2date12(ifgram_list_dropped)
h5.close()
if date12_to_rmv == date12_list_dropped:
date12_to_rmv = []
print 'calculated date12 to drop is the same as exsiting marked input file, set it empty.'
if date12_to_rmv:
##### Update Input Files with date12_to_rmv
Modified_CoherenceFile = 'Modified_coherence.h5'
for File in inps.file:
Modified_File = modify_file_date12_list(File, date12_to_rmv, inps.mark_attribute)
k = readfile.read_attribute(File)['FILE_TYPE']
# Update Mask File
if k == 'interferograms':
print 'update mask file for input '+k+' file based on '+Modified_File
inps.mask_file = 'mask.h5'
print 'writing >>> '+inps.mask_file
ut.nonzero_mask(Modified_File, inps.mask_file)
elif k == 'coherence':
print 'update average spatial coherence for input '+k+' file based on: '+Modified_File
outFile = 'averageSpatialCoherence.h5'
print 'writing >>> '+outFile
ut.temporal_average(Modified_File, outFile)
Modified_CoherenceFile = Modified_File
# Plot result
if inps.plot:
print '\nplot modified network and save to file.'
plotCmd = 'plot_network.py '+Modified_File+' --coherence '+Modified_CoherenceFile+' --nodisplay'
if inps.mask_file:
plotCmd += ' --mask '+inps.mask_file
print plotCmd
os.system(plotCmd)
print 'Done.'
return
else:
print 'No interferogram dropped, skip update.'
return
########################################################################
if __name__ == '__main__':
main(sys.argv[1:])
| [
"\npysar.network.coherenceBased = yes #[yes / no], auto for yes\npysar.network.coherenceFile = auto #[filename], auto for coherence.h5\npysar.network.minCoherence = auto #[0.0-1.0], auto for 0.7\npysar.network.maskFile = auto #[file name, no], auto for mask.h5, no for all pixels\npysar.network.maskAoi.yx = no #[y0:y1,x0:x1 / no], area of interest for coherence calculation, auto for no\npysar.network.maskAoi.lalo = no #[lat0:lat1,lon0:lon1 / no], similar to maskAoi.yx but in lat/lon, auto for no\n\npysar.network.tempBaseMax = 36500 #[1-inf], day, maximum temporal baseline, auto for 3.65e4\npysar.network.perpBaseMax = 10e3 #[1-inf], meter, maximum perpendicular spatial baseline, auto for 10e3\npysar.network.referenceFile = no #[date12_list.txt / Modified_unwrapIfgram.h5 / no], auto for no\npysar.network.excludeDate = no #[20080520,20090817 / no], auto for no\npysar.network.excludeIfgIndex = no #[1,3,25 / no], list of interferogram number starting from 1, auto for no\n"
] |
2024-01-10 | esgraham/azure-sdk-for-python | sdk~ai~azure-ai-resources~azure~ai~resources~_index~_mlindex.py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""MLIndex class for interacting with MLIndex assets."""
import os
import tempfile
from pathlib import Path
from typing import Any, Dict, Iterator, Optional, Union
import yaml
from azure.core.credentials import TokenCredential
from azure.ai.resources._index._documents import Document
from azure.ai.resources._index._embeddings import EmbeddingsContainer
from azure.ai.resources._index._utils.connections import (
get_connection_credential,
get_connection_by_id_v2,
get_target_from_connection,
)
from azure.ai.resources._index._utils.logging import (
get_logger,
langchain_version,
packages_versions_for_compatibility,
track_activity,
version,
)
try:
from langchain.document_loaders.base import BaseLoader
except ImportError:
BaseLoader = Iterator[Document]
logger = get_logger("mlindex")
class MLIndex:
"""MLIndex class for interacting with MLIndex assets."""
INDEX_FIELD_MAPPING_KEY = "field_mapping"
INDEX_FIELD_MAPPING_TYPES = {
"content": "Raw data content of indexed document",
"embedding": "Embedding of indexed document content",
"metadata": "Metadata of indexed document, must be a JSON string",
"filename": "Filename of indexed document, relative to data source root",
"title": "Title of indexed document",
"url": "User facing citation URL for indexed document",
}
base_uri: str
index_config: dict
embeddings_config: dict
def __init__(
self,
uri: Optional[Union[str, Path, object]] = None,
mlindex_config: Optional[dict] = None
):
"""
Initialize MLIndex from a URI or AzureML Data Asset.
Args:
----
uri: URI to MLIndex asset folder (remote or local)
mlindex_config: MLIndex config dictionary
credential: Credential to use for talking to Azure resources
"""
with track_activity(logger, "MLIndex.__init__") as activity_logger:
if uri is not None:
if isinstance(uri, str):
uri = str(uri)
elif isinstance(uri, Path):
uri = str(uri)
else:
# Assume given AzureML Data Asset
uri = uri.path
try:
import fsspec
except ImportError:
raise ValueError(
"Could not import fsspec python package. "
"Please install it with `pip install fsspec`."
)
try:
import azureml.fsspec
# TODO: Patch azureml.dataprep auth logic to use given credential for loading MLIndex
except ImportError:
logger.warning(
"Could not import azureml-fsspec python package. "
"Loading from remote filesystems supported by AzureML will not work. "
"Please install it with `pip install azureml-fsspec`."
)
self.base_uri = uri
mlindex_config = None
uri = uri.rstrip("/")
mlindex_uri = f"{uri}/MLIndex" if not uri.endswith("MLIndex") else uri
try:
mlindex_file = fsspec.open(mlindex_uri, "r")
if hasattr(mlindex_file.fs, "_path"):
# File on azureml filesystem has path relative to container root so need to get underlying fs path
self.base_uri = mlindex_file.fs._path.split("/MLIndex")[0]
else:
self.base_uri = mlindex_file.path.split("/MLIndex")[0]
with mlindex_file as f:
mlindex_config = yaml.safe_load(f)
except Exception as e:
raise ValueError(f"Could not find MLIndex: {e}") from e
elif mlindex_config is None:
raise ValueError("Must provide either uri or mlindex_config")
self.index_config = mlindex_config.get("index", {})
if self.index_config is None:
raise ValueError("Could not find index config in MLIndex yaml")
activity_logger.activity_info["index_kind"] = self.index_config.get("kind", "none")
self.embeddings_config = mlindex_config.get("embeddings", {})
if self.embeddings_config is None:
raise ValueError("Could not find embeddings config in MLIndex yaml")
activity_logger.activity_info["embeddings_kind"] = self.embeddings_config.get("kind", "none")
activity_logger.activity_info["embeddings_api_type"] = self.embeddings_config.get("api_type", "none")
@property
def name(self) -> str:
"""Returns the name of the MLIndex."""
return self.index_config.get("name", "")
@name.setter
def name(self, value: str):
"""Sets the name of the MLIndex."""
self.index_config["name"] = value
@property
def description(self) -> str:
"""Returns the description of the MLIndex."""
return self.index_config.get("description", "")
@description.setter
def description(self, value: str):
"""Sets the description of the MLIndex."""
self.index_config["description"] = value
def get_langchain_embeddings(self, credential: Optional[TokenCredential] = None):
"""Get the LangChainEmbeddings from the MLIndex."""
embeddings = EmbeddingsContainer.from_metadata(self.embeddings_config.copy())
return embeddings.as_langchain_embeddings(credential=credential)
def as_langchain_vectorstore(self, credential: Optional[TokenCredential] = None):
"""Converts MLIndex to a retriever object that can be used with langchain, may download files."""
with track_activity(logger, "MLIndex.as_langchain_vectorstore") as activity_logger:
index_kind = self.index_config.get("kind", "none")
activity_logger.activity_info["index_kind"] = index_kind
activity_logger.activity_info["embeddings_kind"] = self.embeddings_config.get("kind", "none")
activity_logger.activity_info["embeddings_api_type"] = self.embeddings_config.get("api_type", "none")
if index_kind == "acs":
from azure.ai.resources._index._indexes.azure_search import import_azure_search_or_so_help_me
import_azure_search_or_so_help_me()
if self.index_config.get("field_mapping", {}).get("embedding", None) is None:
raise ValueError("field_mapping.embedding must be set in MLIndex config for acs index, try `.as_langchain_retriever()` instead.")
try:
connection_credential = get_connection_credential(self.index_config, credential=credential)
except Exception as e:
# azure.ai.resources has workflow where env vars are set before doing stuff.
if "AZURE_AI_SEARCH_KEY" in os.environ or "AZURE_COGNITIVE_SEARCH_KEY" in os.environ:
from azure.core.credentials import AzureKeyCredential
logger.warning(f"Failed to get credential for ACS with {e}, falling back to env vars.")
connection_credential = AzureKeyCredential(os.environ["AZURE_AI_SEARCH_KEY"] if "AZURE_AI_SEARCH_KEY" in os.environ else os.environ["AZURE_COGNITIVE_SEARCH_KEY"])
else:
raise e
azure_search_documents_version = packages_versions_for_compatibility["azure-search-documents"]
if (azure_search_documents_version > "11.4.0b6" and langchain_version > "0.0.273") or (azure_search_documents_version == "11.4.0b6" and langchain_version < "0.0.273" and langchain_version >= "0.0.198"):
from langchain.vectorstores import azuresearch
# TODO: These fields effect all ACS retrievers in the same process, should change class so it can
# use these as defaults but uses names passed in as args preferentially
azuresearch.FIELDS_ID = self.index_config.get("field_mapping", {}).get("id", "id")
azuresearch.FIELDS_CONTENT = self.index_config.get("field_mapping", {}).get("content", "content")
azuresearch.FIELDS_CONTENT_VECTOR = self.index_config.get("field_mapping", {}).get("embedding", "content_vector_open_ai")
azuresearch.FIELDS_METADATA = self.index_config.get("field_mapping", {}).get("metadata", "meta_json_string")
from azure.core.credentials import AzureKeyCredential
from langchain.vectorstores.azuresearch import AzureSearch
return AzureSearch(
azure_search_endpoint=self.index_config.get(
"endpoint",
get_target_from_connection(
get_connection_by_id_v2(
self.index_config["connection"]["id"],
credential=credential
)
)
),
azure_search_key=connection_credential.key if isinstance(connection_credential, AzureKeyCredential) else None,
index_name=self.index_config.get("index"),
embedding_function=self.get_langchain_embeddings(credential=credential).embed_query,
search_type="hybrid",
semantic_configuration_name=self.index_config.get("semantic_configuration_name", "azureml-default"),
user_agent=f"azureml-rag=={version}/mlindex,langchain=={langchain_version}",
)
else:
from azure.ai.resources._index._langchain.acs import AzureCognitiveSearchVectorStore
logger.warning(f"azure-search-documents=={azure_search_documents_version} not compatible langchain.vectorstores.azuresearch yet, using REST client based VectorStore.")
return AzureCognitiveSearchVectorStore(
index_name=self.index_config.get("index"),
endpoint=self.index_config.get(
"endpoint",
get_target_from_connection(
get_connection_by_id_v2(
self.index_config["connection"]["id"],
credential=credential
)
)
),
embeddings=self.get_langchain_embeddings(credential=credential),
field_mapping=self.index_config.get("field_mapping", {}),
credential=connection_credential,
)
elif index_kind == "faiss":
from fsspec.core import url_to_fs
store = None
engine = self.index_config.get("engine")
if engine == "langchain.vectorstores.FAISS":
from azure.ai.resources._index._langchain.vendor.vectorstores.faiss import FAISS
embeddings = EmbeddingsContainer.from_metadata(self.embeddings_config.copy()).as_langchain_embeddings(credential=credential)
fs, uri = url_to_fs(self.base_uri)
with tempfile.TemporaryDirectory() as tmpdir:
fs.download(f"{uri.rstrip('/')}/index.pkl", f"{str(tmpdir)}")
fs.download(f"{uri.rstrip('/')}/index.faiss", f"{str(tmpdir)}")
store = FAISS.load_local(str(tmpdir), embeddings)
elif engine.endswith("indexes.faiss.FaissAndDocStore"):
from azure.ai.resources._index._indexes.faiss import FaissAndDocStore
error_fmt_str = """Failed to import langchain faiss bridge module with: {e}\n"
This could be due to an incompatible change in langchain since this bridge was implemented.
If you understand what has changed you could implement your own wrapper of azure.ai.tools.mlindex._indexes.faiss.FaissAndDocStore.
"""
try:
from azure.ai.resources._index._langchain.faiss import azureml_faiss_as_langchain_faiss
except Exception as e:
logger.warning(error_fmt_str.format(e=e))
azureml_faiss_as_langchain_faiss = None
embeddings = EmbeddingsContainer.from_metadata(self.embeddings_config.copy()).as_langchain_embeddings(credential=credential)
store = FaissAndDocStore.load(self.base_uri, embeddings.embed_query)
if azureml_faiss_as_langchain_faiss is not None:
try:
store = azureml_faiss_as_langchain_faiss(FaissAndDocStore.load(self.base_uri, embeddings.embed_query))
except Exception as e:
logger.error(error_fmt_str.format(e=e))
raise
else:
raise ValueError(f"Unknown engine: {engine}")
return store
else:
raise ValueError(f"Unknown index kind: {index_kind}")
def as_langchain_retriever(self, credential: Optional[TokenCredential] = None, **kwargs):
"""Converts MLIndex to a retriever object that can be used with langchain, may download files."""
index_kind = self.index_config.get("kind", None)
if index_kind == "acs":
if self.index_config.get("field_mapping", {}).get("embedding", None) is None:
from azure.ai.resources._index._langchain.acs import AzureCognitiveSearchVectorStore
connection_credential = get_connection_credential(self.index_config, credential=credential)
return AzureCognitiveSearchVectorStore(
index_name=self.index_config.get("index"),
endpoint=self.index_config.get(
"endpoint",
get_target_from_connection(
get_connection_by_id_v2(
self.index_config["connection"]["id"],
credential=credential
)
)
),
embeddings=self.get_langchain_embeddings(credential=credential),
field_mapping=self.index_config.get("field_mapping", {}),
credential=connection_credential,
).as_retriever(**kwargs)
return self.as_langchain_vectorstore(credential=credential).as_retriever(**kwargs)
elif index_kind == "faiss":
return self.as_langchain_vectorstore(credential=credential).as_retriever(*kwargs)
else:
raise ValueError(f"Unknown index kind: {index_kind}")
def __repr__(self):
"""Returns a string representation of the MLIndex object."""
return yaml.dump({
"index": self.index_config,
"embeddings": self.embeddings_config,
})
def save(self, output_uri: Optional[str], just_config: bool = False):
"""
Save the MLIndex to a uri.
Will use uri MLIndex was loaded from if `output_uri` not set.
"""
# Use fsspec to create MLIndex yaml file at output_uri and call save on _underlying_index if present
try:
import fsspec
mlindex_file = fsspec.open(f"{output_uri.rstrip('/')}/MLIndex", "w")
# parse yaml to dict
with mlindex_file as f:
yaml.safe_dump({
"embeddings": self.embeddings_config,
"index": self.index_config
}, f)
if not just_config:
files = fsspec.open_files(f"{self.base_uri}/*")
files += fsspec.open_files(f"{self.base_uri}/**/*")
for file in files:
if file.path.endswith("MLIndex"):
continue
with file.open() as src:
with fsspec.open(f"{output_uri.rstrip('/')}/{file.path.replace(self.base_uri, '').lstrip('/')}", "wb") as dest:
dest.write(src.read())
except Exception as e:
raise ValueError(f"Could not save MLIndex: {e}") from e
| [
"Raw data content of indexed document"
] |
2024-01-10 | esgraham/azure-sdk-for-python | sdk~ai~azure-ai-resources~azure~ai~resources~_index~_models.py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Language model classes."""
import copy
import json
import os
from typing import Optional
from azure.core.credentials import TokenCredential
from azure.ai.resources._index._utils.connections import (
connection_to_credential,
get_connection_by_id_v2,
get_connection_credential,
)
from azure.ai.resources._index._utils.logging import get_logger
logger = get_logger(__name__)
def parse_model_uri(uri: str, **kwargs) -> dict:
"""Parse a model URI into a dictionary of configuration parameters."""
scheme, details = uri.split("://")
def split_details(details):
details = details.split("/")
dets = {}
for i in range(0, len(details), 2):
dets[details[i]] = details[i + 1]
return dets
config = {**kwargs}
if scheme == "azure_open_ai":
config = {**split_details(details), **config}
config["kind"] = "open_ai"
if "endpoint" in config:
if ".openai." in config["endpoint"] or ".api.cognitive." in config["endpoint"]:
config["api_base"] = config["endpoint"].rstrip("/")
else:
config["api_base"] = f"https://{config['endpoint']}.openai.azure.com"
config["api_type"] = "azure"
config["api_version"] = kwargs.get("api_version") if kwargs.get("api_version") is not None else "2023-03-15-preview"
# Azure OpenAI has a batch_size limit of 16
if "batch_size" not in config:
config["batch_size"] = "16"
elif scheme == "open_ai":
config["kind"] = "open_ai"
config = {**split_details(details), **config}
config["api_type"] = "open_ai"
elif scheme == "hugging_face":
config["kind"] = "hugging_face"
config["model"] = details.split("model/")[1]
elif scheme == "none":
config["kind"] = "none"
else:
raise ValueError(f"Unknown model kind: {scheme}")
return config
def init_open_ai_from_config(config: dict, credential: Optional[TokenCredential]) -> dict:
"""Initialize an OpenAI model from a configuration dictionary."""
import openai
logger.debug("OpenAI arguments: \n")
logger.debug("\n".join(f"{k}={v}" if k != "key" and k != "api_key" else f"{k}=[REDACTED]" for k, v in config.items()))
try:
if config.get("key") is not None:
config["api_key"] = config.get("key")
elif "connection_type" not in config:
if config.get("api_key") is None:
config["api_key"] = os.environ.get("OPENAI_API_KEY", None)
if config["api_key"] is None and "azure" in config["api_type"]:
from azure.identity import DefaultAzureCredential
credential = DefaultAzureCredential(process_timeout=60) if credential is None else credential
config["api_key"] = credential.get_token("https://cognitiveservices.azure.com/.default").token
config["api_type"] = "azure_ad"
else:
if config["connection_type"] == "workspace_connection":
connection_id = config.get("connection", {}).get("id", "")
connection = get_connection_by_id_v2(connection_id, credential=credential)
# Only change base, version, and type in AOAI case
if hasattr(connection, "type"):
if connection.type == "azure_open_ai":
config["api_base"] = connection.target
connection_metadata = connection.metadata
config["api_version"] = connection.metadata.get("apiVersion", connection_metadata.get("ApiVersion", "2023-07-01-preview"))
config["api_type"] = connection.metadata.get("apiType", connection_metadata.get("ApiType", "azure")).lower()
elif isinstance(connection, dict) and connection.get("properties", {}).get("category", None) == "AzureOpenAI":
config["api_base"] = connection.get("properties", {}).get("target")
connection_metadata = connection.get("properties", {}).get("metadata", {})
config["api_version"] = connection_metadata.get("apiVersion", connection_metadata.get("ApiVersion", "2023-03-15-preview"))
config["api_type"] = connection_metadata.get("apiType", connection_metadata.get("ApiType", "azure")).lower()
if config["api_type"] == "azure_ad" or config["api_type"] == "azuread":
from azure.identity import DefaultAzureCredential
credential = DefaultAzureCredential(process_timeout=60) if credential is None else credential
else:
credential = connection_to_credential(connection)
else:
credential = get_connection_credential(config)
if not hasattr(credential, "key"):
# Add hack to check for "BAKER-OPENAI-API-KEY"
if config.get("connection_type", "workspace_keyvault") == "workspace_keyvault":
new_args = copy.deepcopy(config)
new_args["connection"]["key"] = "BAKER-OPENAI-API-KEY"
credential = get_connection_credential(new_args)
if hasattr(credential, "key"):
config["api_key"] = credential.key
else:
config["api_key"] = credential.get_token("https://cognitiveservices.azure.com/.default").token
config["api_type"] = "azure_ad"
except Exception as e:
if "OPENAI_API_KEY" in os.environ:
logger.warning(f"Failed to get credential for ACS with {e}, falling back to env vars.")
config["api_key"] = os.environ["OPENAI_API_KEY"]
config["api_type"] = os.environ.get("OPENAI_API_TYPE", "azure")
config["api_base"] = os.environ.get("OPENAI_API_BASE", openai.api_base)
config["api_version"] = os.environ.get("OPENAI_API_VERSION", openai.api_version)
else:
raise e
if "azure" in openai.api_type:
config["api_version"] = config.get("api_version", "2023-03-15-preview")
return config | [] |
2024-01-10 | Siyuan-Harry/OmniTutor | try_my_app.py | import pandas as pd
import numpy as np
import faiss
from openai import OpenAI
import tempfile
from PyPDF2 import PdfReader
import io
from sentence_transformers import SentenceTransformer
import streamlit as st
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from collections import Counter
#from langdetect import detect
#import jieba
#import jieba.analyse
import nltk
@st.cache_data
def download_nltk():
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
def chunkstring(string, length):
return (string[0+i:length+i] for i in range(0, len(string), length))
def pdf_parser(input_pdf):
pdf = PdfReader(input_pdf)
pdf_content = ""
for page in pdf.pages:
pdf_content += page.extract_text()
return pdf_content
def get_keywords(file_paths): #这里的重点是,对每一个file做尽可能简短且覆盖全面的summarization
download_nltk()
keywords_list = []
for file_path in file_paths:
with open(file_path, 'r') as file:
data = file.read()
# tokenize
words = word_tokenize(data)
# remove punctuation
words = [word for word in words if word.isalnum()]
# remove stopwords
stop_words = set(stopwords.words('english'))
words = [word for word in words if word not in stop_words]
# lemmatization
lemmatizer = WordNetLemmatizer()
words = [lemmatizer.lemmatize(word) for word in words]
# count word frequencies
word_freq = Counter(words)
# get top 20 most common words
keywords = word_freq.most_common(20)
new_keywords = []
for word in keywords:
new_keywords.append(word[0])
str_keywords = ''
for word in new_keywords:
str_keywords += word + ", "
keywords_list.append(f"Top20 frequency keywords for {file_path}: {str_keywords}")
return keywords_list
def get_completion_from_messages(client, messages, model="gpt-4-1106-preview", temperature=0):
client = client
completion = client.chat.completions.create(
model=model,
messages=messages,
temperature=temperature,
)
return completion.choices[0].message.content
def genarating_outline(client, keywords, num_lessons,language):
system_message = 'You are a great AI teacher and linguist, skilled at create course outline based on summarized knowledge materials.'
user_message = f"""You are a great AI teacher and linguist,
skilled at generating course outline based on keywords of the course.
Based on keywords provided, you should carefully design a course outline.
Requirements: Through learning this course, learner should understand those key concepts.
Key concepts: {keywords}
you should output course outline in a python list format, Do not include anything else except that python list in your output.
Example output format:
[[name_lesson1, abstract_lesson1],[name_lesson2, abstrct_lesson2]]
In the example, you can see each element in this list consists of two parts: the "name_lesson" part is the name of the lesson, and the "abstract_lesson" part is the one-sentence description of the lesson, intruduces knowledge it contained.
for each lesson in this course, you should provide these two information and organize them as exemplified.
for this course, you should design {num_lessons} lessons in total.
the course outline should be written in {language}.
Start the work now.
"""
messages = [
{'role':'system',
'content': system_message},
{'role':'user',
'content': user_message},
]
response = get_completion_from_messages(client, messages)
list_response = ['nothing in the answers..']
try:
list_response = eval(response)
except SyntaxError:
pass
return list_response
def courseOutlineGenerating(client, file_paths, num_lessons, language):
summarized_materials = get_keywords(file_paths)
course_outline = genarating_outline(client, summarized_materials, num_lessons, language)
return course_outline
def constructVDB(file_paths):
#把KM拆解为chunks
chunks = []
for filename in file_paths:
with open(filename, 'r') as f:
content = f.read()
for chunk in chunkstring(content, 730):
chunks.append(chunk)
chunk_df = pd.DataFrame(chunks, columns=['chunk'])
#从文本chunks到embeddings
model = SentenceTransformer('paraphrase-mpnet-base-v2')
embeddings = model.encode(chunk_df['chunk'].tolist())
# convert embeddings to a dataframe
embedding_df = pd.DataFrame(embeddings.tolist())
# Concatenate the original dataframe with the embeddings
paraphrase_embeddings_df = pd.concat([chunk_df, embedding_df], axis=1)
# Save the results to a new csv file
#从embeddings到向量数据库
# Load the embeddings
embeddings = paraphrase_embeddings_df.iloc[:, 1:].values # All columns except the first (chunk text)
# Ensure that the array is C-contiguous
embeddings = np.ascontiguousarray(embeddings, dtype=np.float32)
# Preparation for Faiss
dimension = embeddings.shape[1] # the dimension of the vector space
index = faiss.IndexFlatL2(dimension)
# Normalize the vectors
faiss.normalize_L2(embeddings)
# Build the index
index.add(embeddings)
# write index to disk
return paraphrase_embeddings_df, index
def searchVDB(search_sentence, paraphrase_embeddings_df, index):
#从向量数据库中检索相应文段
try:
data = paraphrase_embeddings_df
embeddings = data.iloc[:, 1:].values # All columns except the first (chunk text)
embeddings = np.ascontiguousarray(embeddings, dtype=np.float32)
model = SentenceTransformer('paraphrase-mpnet-base-v2')
sentence_embedding = model.encode([search_sentence])
# Ensuring the sentence embedding is in the correct format
sentence_embedding = np.ascontiguousarray(sentence_embedding, dtype=np.float32)
# Searching for the top 3 nearest neighbors in the FAISS index
D, I = index.search(sentence_embedding, k=3)
# Printing the top 3 most similar text chunks
retrieved_chunks_list = []
for idx in I[0]:
retrieved_chunks_list.append(data.iloc[idx].chunk)
except Exception:
retrieved_chunks_list = []
return retrieved_chunks_list
def generateCourse(client, topic, materials, language, style_options):
system_message = 'You are a great AI teacher and linguist, skilled at writing informative and easy-to-understand course script based on given lesson topic and knowledge materials.'
user_message = f"""You are a great AI teacher and linguist,
skilled at writing informative and easy-to-understand course script based on given lesson topic and knowledge materials.\n
You should write a course for new hands, they need detailed and vivid explaination to understand the topic. \n
A high-quality course should meet requirements below:\n
(1) Contains enough facts, data and figures to be convincing\n
(2) The internal narrative is layered and logical, not a simple pile of items\n
Make sure all these requirements are considered when writing the lesson script content.\n
Please follow this procedure step-by-step when disgning the course:\n
Step 1. Write down the teaching purpose of the lesson initially in the script. \n
Step 2. Write down the outline of this lesson (outline is aligned to the teaching purpose), then follow the outline to write the content. Make sure every concept in the outline is explined adequately in the course. \n
Your lesson topic and abstract is within the 「」 quotes, and the knowledge materials are within the 【】 brackets. \n
lesson topic and abstract: 「{topic}」, \n
knowledge materials related to this lesson:【{materials} 】 \n
the script should be witten in {language}, and mathematical symbols should be written in markdown form. \n
{style_options} \n
Start writting the script of this lesson now.
"""
messages = [
{'role':'system',
'content': system_message},
{'role':'user',
'content': user_message},
]
response = get_completion_from_messages(client, messages)
return response
def decorate_user_question(user_question, retrieved_chunks_for_user):
decorated_prompt = f'''You're a brilliant teaching assistant, skilled at answer stundent's question based on given materials.
student's question: 「{user_question}」
related materials:【{retrieved_chunks_for_user}】
if the given materials are irrelavant to student's question, please use your own knowledge to answer the question.
You need to break down the student's question first, find out what he really wants to ask, and then try your best to give a comprehensive answer.
The language you're answering in should aligned with what student is using.
Now you're talking to the student. Please answer.
'''
return decorated_prompt
def initialize_file(added_files):
temp_file_paths = []
with st.spinner('Processing file...'):
for added_file in added_files:
if added_file.name.endswith(".pdf"):
string = pdf_parser(added_file)
with tempfile.NamedTemporaryFile(suffix=".md", delete=False) as tmp:
tmp.write(string.encode("utf-8"))
tmp_path = tmp.name
else:
with tempfile.NamedTemporaryFile(delete=False, suffix=".md") as tmp:
tmp.write(added_file.getvalue())
tmp_path = tmp.name
temp_file_paths.append(tmp_path)
st.success('Processing file...Done')
return temp_file_paths
def initialize_vdb(temp_file_paths):
with st.spinner('Constructing vector database from provided materials...'):
embeddings_df, faiss_index = constructVDB(temp_file_paths)
st.success("Constructing vector database from provided materials...Done")
return embeddings_df, faiss_index
def initialize_outline(client, temp_file_paths, num_lessons, language):
with st.spinner('Generating Course Outline...'):
course_outline_list = courseOutlineGenerating(client, temp_file_paths, num_lessons, language)
st.success("Generating Course Outline...Done")
course_outline_string = ''
lessons_count = 0
for outline in course_outline_list:
lessons_count += 1
course_outline_string += f"{lessons_count}." + outline[0]
course_outline_string += '\n\n' + outline[1] + '\n\n'
with st.expander("Check the course outline", expanded=False):
st.write(course_outline_string)
return course_outline_list
def initialize_content(client, course_outline_list, embeddings_df, faiss_index, language, style_options):
count_generating_content = 0
course_content_list = []
for lesson in course_outline_list:
count_generating_content += 1
with st.spinner(f"Writing content for lesson {count_generating_content}..."):
retrievedChunksList = searchVDB(lesson, embeddings_df, faiss_index)
courseContent = generateCourse(client, lesson, retrievedChunksList, language, style_options)
course_content_list.append(courseContent)
st.success(f"Writing content for lesson {count_generating_content}...Done")
with st.expander(f"Learn the lesson {count_generating_content} ", expanded=False):
st.markdown(courseContent)
return course_content_list
def regenerate_outline(course_outline_list):
try:
course_outline_string = ''
lessons_count = 0
for outline in course_outline_list:
lessons_count += 1
course_outline_string += f"{lessons_count}." + outline[0]
course_outline_string += '\n\n' + outline[1] + '\n\n'
with st.expander("Check the course outline", expanded=False):
st.write(course_outline_string)
except Exception:
pass
def regenerate_content(course_content_list):
try:
count_generating_content = 0
for content in course_content_list:
count_generating_content += 1
with st.expander(f"Learn the lesson {count_generating_content} ", expanded=False):
st.markdown(content)
except Exception:
pass
def add_prompt_course_style(selected_style_list):
initiate_prompt = 'Please be siginificantly aware that this course is requested to: \n'
customize_prompt = ''
if len(selected_style_list) != 0:
customize_prompt += initiate_prompt
for style in selected_style_list:
if style == "More examples":
customize_prompt += '- **contain more examples**. You should use your own knowledge to vividly exemplify key concepts occured in this course.\n'
elif style == "More excercises":
customize_prompt += '- **contain more excercises**. So last part of this lesson should be excercises.\n'
elif style == "Easier to learn":
customize_prompt += '- **Be easier to learn**. So you should use plain language to write the lesson script, and apply some metaphors & analogys wherever appropriate.\n'
return customize_prompt
def app():
st.title("OmniTutor v0.1.0")
announce = st.caption('''
:blue[⚠️Key Announcement:] The free service has been disrupted due to high costs. In order to use OmniTutor you need to type your own OPENAI API key into the sidebar.
:blue[⚠️关键公告:] 免费服务现在已经因为过高的支出而中断。为了使用OmniTutor,你需要在边栏顶部输入自己的Openai API key。
''')
divider = st.divider()
st.markdown("""
<style>
.footer {
position: fixed;
bottom: 0;
right: 10px;
width: auto;
background-color: transparent;
text-align: right;
padding-right: 10px;
padding-bottom: 10px;
}
</style>
<div class="footer">Made with 🧡 by Siyuan</div>
""", unsafe_allow_html=True)
with st.sidebar:
api_key = st.text_input('Your OpenAI API key:', 'sk-...')
st.image("https://siyuan-harry.oss-cn-beijing.aliyuncs.com/oss://siyuan-harry/20231021212525.png")
added_files = st.file_uploader('Upload .md or .pdf files, simultaneous mixed upload these types is supported.', type=['.md','.pdf'], accept_multiple_files=True)
with st.expander('Customize my course'):
num_lessons = st.slider('How many lessons do you want this course to have?', min_value=2, max_value=15, value=5, step=1)
custom_options = st.multiselect(
'Preferred teaching style :grey[(Recommend new users not to select)]',
['More examples', 'More excercises', 'Easier to learn'],
max_selections = 2
)
style_options = add_prompt_course_style(custom_options)
language = 'English'
Chinese = st.checkbox('Output in Chinese')
if Chinese:
language = 'Chinese'
btn = st.button('Generate my course!')
if "description1" not in st.session_state:
st.session_state.description = ''
if "start_col1" not in st.session_state:
st.session_state.start_col1 = st.empty()
if "start_col2" not in st.session_state:
st.session_state.start_col2 = st.empty()
if "case_pay" not in st.session_state:
st.session_state.case_pay = st.empty()
if "embeddings_df" not in st.session_state:
st.session_state.embeddings_df = ''
if "faiss_index" not in st.session_state:
st.session_state.faiss_index = ''
if "course_outline_list" not in st.session_state:
st.session_state.course_outline_list = ''
if "course_content_list" not in st.session_state:
st.session_state.course_content_list = ''
if "OPENAI_API_KEY" not in st.session_state:
st.session_state["OPENAI_API_KEY"] = ''
#if "client" not in st.session_state:
# st.session_state["client"] = ''
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-4-1106-preview"
if "messages_ui" not in st.session_state:
st.session_state.messages_ui = []
if "messages" not in st.session_state:
st.session_state.messages = []
st.session_state.start_col1, st.session_state.start_col2 = st.columns(2)
with st.session_state.start_col1:
st.session_state.description = st.markdown('''
<font color = 'grey'> An all-round teacher. A teaching assistant who really knows the subject. **Anything. Anywhere. All at once.** </font> :100:
Github Repo: https://github.com/Siyuan-Harry/OmniTutor
### ✨ Key features
- 🧑🏫 **Concise and clear course creation**: <font color = 'grey'>Generated from your learning notes (**.md**) or any learning materials (**.pdf**)!</font>
- 📚 **All disciplines**: <font color = 'grey'>Whether it's math, physics, literature, history or coding, OmniTutor covers it all.</font>
- ⚙️ **Customize your own course**: <font color = 'grey'>Choose your preferred teaching style, lesson count and language.</font>
- ⚡️ **Fast respond with trustable accuracy**: <font color = 'grey'>Problem-solving chat with the AI teaching assistant who really understand the materials.</font>
### 🏃♂️ Get started!
1. **Input Your OpenAI API Key**: <font color = 'grey'>Give OmniTutor your own OpenAI API key (On top of the **sidebar**) to get started.</font>
2. **Upload learning materials**: <font color = 'grey'>The upload widget in the sidebar supports PDF and .md files simutaenously.</font>
3. **Customize your course**: <font color = 'grey'>By few clicks and swipes, adjusting teaching style, lesson count and language for your course.</font>
4. **Start course generating**: <font color = 'grey'>Touch "Generate my course!" button in the sidebar, then watch how OmniTutor creates personal-customized course for you.</font>
5. **Interactive learning**: <font color = 'grey'>Learn the course, and ask OmniTutor any questions related to this course whenever you encountered them.</font>
🎉 Have fun playing with Omnitutor!
''', unsafe_allow_html=True
)
with st.session_state.start_col2:
st.session_state.case_pay = st.markdown('''
### 💡Application Cases 使用案例
> <font color = 'grey'> 用OmniTutor阅读专业的论文! </font>
<img src="https://siyuan-harry.oss-cn-beijing.aliyuncs.com/oss://siyuan-harry/WX20231104-122256@2x%2016.03.49.png" height = "200" />
<img src="https://siyuan-harry.oss-cn-beijing.aliyuncs.com/oss://siyuan-harry/WX20231104-113553@2x%202.png" height = "200" />
> <font color = 'grey'> 用OmniTutor学习Python! </font>
<img src="https://siyuan-harry.oss-cn-beijing.aliyuncs.com/oss://siyuan-harry/[email protected]" height = "200" />
<img src="https://siyuan-harry.oss-cn-beijing.aliyuncs.com/oss://siyuan-harry/[email protected]" height = "200" />
---
### 🙌 Contact me 联系我
欢迎联系开发者 @何思远方Siyuan. Feel free to get in touch with me!
- 邮箱📮:[email protected]
- B站:何思远方Siyuan
''', unsafe_allow_html=True)
if btn:
if api_key != "sk-..." and api_key !="" and api_key.startswith("sk-"):
st.session_state.start_col1.empty()
st.session_state.start_col2.empty()
st.session_state.description.empty()
st.session_state.case_pay.empty()
announce.empty()
divider.empty()
#initialize app
temp_file_paths = initialize_file(added_files)
st.session_state["OPENAI_API_KEY"] = api_key
client = OpenAI(api_key = st.session_state["OPENAI_API_KEY"])
st.session_state.embeddings_df, st.session_state.faiss_index = initialize_vdb(temp_file_paths)
st.session_state.course_outline_list = initialize_outline(client, temp_file_paths, num_lessons, language)
st.session_state.course_content_list = initialize_content(client, st.session_state.course_outline_list, st.session_state.embeddings_df, st.session_state.faiss_index, language, style_options)
st.markdown('''
> 🤔 <font color = 'grey'> **Not satisfied with this course?** Simply click "Generate my course!" button to regenerate a new one! </font>
>
> 😁 <font color = 'grey'> If the course is good enough for you, learn and enter questions related in the input box below 👇... </font>
:blue[Wish you all the best in your learning journey :)]
''', unsafe_allow_html=True)
else:
st.session_state.start_col1.empty()
st.session_state.start_col2.empty()
st.session_state.description.empty()
st.session_state.case_pay.empty()
announce.empty()
divider.empty()
warning = st.write("请输入正确的OpenAI API Key令牌")
col1, col2 = st.columns([0.6,0.4])
user_question = st.chat_input("Enter your questions when learning...")
if user_question:
st.session_state.start_col1.empty()
st.session_state.start_col2.empty()
st.session_state.description.empty()
st.session_state.case_pay.empty()
announce.empty()
divider.empty()
with col1:
#把课程大纲打印出来
regenerate_outline(st.session_state.course_outline_list)
#把课程内容打印出来
regenerate_content(st.session_state.course_content_list)
with col2:
st.caption(''':blue[AI Assistant]: Ask this TA any questions related to this course and get direct answers. :sunglasses:''')
# Set a default model
with st.chat_message("assistant"):
st.write("Hello👋, how can I help you today? 😄")
# Display chat messages from history on app rerun
for message in st.session_state.messages_ui:
with st.chat_message(message["role"]):
st.markdown(message["content"])
#更新ui上显示的聊天记录
st.session_state.messages_ui.append({"role": "user", "content": user_question})
# Display new user question.
with st.chat_message("user"):
st.markdown(user_question)
#这里的session.state就是保存了这个对话会话的一些基本信息和设置
retrieved_chunks_for_user = searchVDB(user_question, st.session_state.embeddings_df, st.session_state.faiss_index)
prompt = decorate_user_question(user_question, retrieved_chunks_for_user)
st.session_state.messages.append({"role": "user", "content": prompt})
# Display assistant response in chat message container
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
client = OpenAI(api_key = st.session_state["OPENAI_API_KEY"])
for response in client.chat.completions.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages #用chatbot那边的隐藏消息记录
],
stream=True,
):
try:
full_response += response.choices[0].delta.content
except:
full_response += ""
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
st.session_state.messages_ui.append({"role": "assistant", "content": full_response})
if __name__ == "__main__":
app() | [
"- **contain more examples**. You should use your own knowledge to vividly exemplify key concepts occured in this course.\n",
"- **contain more excercises**. So last part of this lesson should be excercises.\n",
"You're a brilliant teaching assistant, skilled at answer stundent's question based on given materials.\n student's question: 「PLACEHOLDER」\n related materials:【PLACEHOLDER】\n if the given materials are irrelavant to student's question, please use your own knowledge to answer the question.\n You need to break down the student's question first, find out what he really wants to ask, and then try your best to give a comprehensive answer.\n The language you're answering in should aligned with what student is using.\n Now you're talking to the student. Please answer.\n ",
"Please be siginificantly aware that this course is requested to: \n",
"- **Be easier to learn**. So you should use plain language to write the lesson script, and apply some metaphors & analogys wherever appropriate.\n",
"content"
] |
2024-01-10 | therealcyberlord/Flashily.AI | backend~imgen.py | from openai import OpenAI
from dotenv import load_dotenv
load_dotenv()
client = OpenAI()
response = client.images.generate(
model="dall-e-3",
prompt="a tree sloth in a blue hoodie, wearing black sony wireless headphones. He sips on a clear plastic cup of ice latte through a light blue straw. The tree sloth sits in high school styled chair with the table attached. There is a laptop on the high school styled chair, showing computer science lectures",
size="1024x1024",
quality="hd",
n=1,
)
image_url = response.data[0].url
print(image_url)
| [] |
2024-01-10 | emur-uy/chatbot | engine.py | import dotenv
import os
import psycopg2
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.document_loaders import TextLoader
# Load environment variables from .env file, including the OpenAI API key
dotenv.load_dotenv()
# Establish a connection to the database using the connection string stored in an environment variable
db_url = os.getenv('SQLALCHEMY_DATABASE_URL')
connection = psycopg2.connect(db_url)
cursor = connection.cursor()
# Execute a SQL query to fetch all data from the 'data' column of the 'information' table
cursor.execute("SELECT data FROM information")
rows = cursor.fetchall()
# Close the database connection when done
connection.close()
# Extract the 'data' field from each row and store them in a list
documents = [row[0] for row in rows]
# Instantiate a CharacterTextSplitter with a specific chunk size and overlap
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
# Use the text splitter to convert each document into a list of chunks
# Each chunk will be a separate document
docs = text_splitter.create_documents(documents)
# Use the FAISS (Facebook AI Similarity Search) library to create an index from the documents
# This index will allow us to perform fast similarity searches
# Using the OpenAIEmbeddings to transform the text documents into numerical vectors
faissIndex = FAISS.from_documents(docs, OpenAIEmbeddings())
# Save the index to a local file for future use
faissIndex.save_local("db_docs") | [] |
2024-01-10 | uqarni/tsl-demo | functions.py | import openai
import os
import re
import random
from datetime import datetime, timedelta
import random
#generate openai response; returns messages with openai response
def ideator(messages):
key = os.environ.get("OPENAI_API_KEY")
openai.api_key = key
result = openai.ChatCompletion.create(
model="gpt-4",
messages= messages
)
response = result["choices"][0]["message"]["content"]
def split_sms(message):
import re
# Use regular expressions to split the string at ., !, or ? followed by a space or newline
sentences = re.split('(?<=[.!?]) (?=\\S)|(?<=[.!?])\n', message.strip())
# Strip leading and trailing whitespace from each sentence
sentences = [sentence.strip() for sentence in sentences if sentence.strip()]
# Compute the cumulative length of all sentences
cum_length = [0]
for sentence in sentences:
cum_length.append(cum_length[-1] + len(sentence))
total_length = cum_length[-1]
# Find the splitting point
split_point = next(i for i, cum_len in enumerate(cum_length) if cum_len >= total_length / 2)
# Split the sentences into two parts at the splitting point
part1 = sentences[:split_point]
part2 = sentences[split_point:]
# Join the sentences in each part back into strings and exclude any part that is empty
strings = []
if part1:
strings.append(" ".join(part1))
if part2:
strings.append(" ".join(part2))
return strings
split_response = split_sms(response)
count = len(split_response)
for section in split_response:
section = {
"role": "assistant",
"content": section
}
messages.append(section)
return messages, count | [] |
2024-01-10 | Bradybry/Teams-Transcript-Summarizer | MSA.py | from flask import Flask, render_template, request, Response, make_response, session, redirect, url_for
import os
from pathlib import Path
from typing import List, Any
from expert import LanguageExpert
from langchain.text_splitter import TokenTextSplitter
import webvtt
global bullet_expert
bullet_expert = {'name': 'Meetings Minutes Writer',
'system_message': 'You are a helpful assitant that generates bulleted meeting summaries.',
'model_params': {'model_name': 'claude-2', 'temperature': 0.7, 'frequency_penalty': 1.0, 'presence_penalty': 0.5, 'n': 1, 'max_tokens': 2048}}
def chunk_text(text: str, chunk_size: int = 2048, chunk_overlap: int = 0) -> List[str]:
"""Split the text into chunks of a specified size."""
splitter = TokenTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
return splitter.split_text(text)
def batch_list(items: List[Any], batch_size: int = 10) -> List[List[Any]]:
"""Split a list into smaller lists of a specified size."""
return [items[i:i + batch_size] for i in range(0, len(items), batch_size)]
def convert_vtt_to_txt(infile, outfile):
"""Convert VTT subtitle file to a plain text file."""
vtt = webvtt.read(infile)
transcript = ""
lines = []
last_speaker = None
for line in vtt:
speaker = line.lines[0].split('>')[0].split('v ')[1]
if last_speaker != speaker:
lines.append('\n'+speaker + ': ')
lines.extend(line.text.strip().splitlines())
last_speaker = speaker
previous = None
for line in lines:
if line == previous:
continue
transcript += f" {line}"
previous = line
with open(outfile, 'w') as f:
f.write(transcript)
print(f'Length of original:\t{len(vtt.content)} characters\nLength of final:\t{len(transcript)} characters\nPercent Reduction:\t{100 - len(transcript)*100/len(vtt.content):.0f}%')
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'uploads'
# Ensure that the Flask app has a secret key to use sessions
app.secret_key = os.urandom(24)
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method != 'POST':
return render_template('index.html')
# Check if file is uploaded
is_file_uploaded = 'file' in request.files and request.files['file'].filename != ''
# Check if text is entered
is_text_entered = 'text' in request.form and request.form['text'].strip() != ''
if not is_file_uploaded and not is_text_entered:
return 'No file or text provided', 400
if is_file_uploaded:
file = request.files['file']
input_file = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
file.save(input_file)
if input_file.endswith('.vtt'):
convert_vtt_to_txt(input_file, f'{input_file[:-4]}.txt')
input_file = f'{input_file[:-4]}.txt'
text_content = Path(input_file).read_text(encoding='utf-8')
else:
text_content = request.form['text']
# (Perform the summarization code, as before...)
selected_model = request.form['model_name']
summary = summarize(text_content, selected_model)
session['summary'] = summary
# Instead of creating a downloadable file, render the template with the summary
return redirect(url_for('display_summary'))
@app.route('/download-summary', methods=['GET', 'POST'])
def download_summary():
if request.method == 'POST':
# Extract the edited summary from the request if available
summary = request.form.get('edited-summary', '')
else:
# Get the summary from the user session for GET requests
summary = session.get('summary', '')
# Prepare the response as a downloadable text file
response = make_response(summary)
response.headers.set('Content-Type', 'text/plain')
response.headers.set('Content-Disposition', 'attachment', filename='summary.txt')
return response
@app.route('/summary', methods=['GET', 'POST'])
def display_summary():
# Get the summary from the user session
summary = session.get('summary', '')
if request.method == 'POST':
if edited_summary := request.form.get('edited-summary', ''):
summary = edited_summary
session['summary'] = summary
# Render the template with the summary
return render_template('index.html', summary=summary)
def summarize(text_content, selected_model):
max_tokens = 75000 if 'claude' in selected_model else 4096
chunks_of_text_content: List[str] = chunk_text(text_content, chunk_size=max_tokens)
chunks_of_text_content: List[str] = [f'Summarize the following meeting in a bulleted format. Include as many details as possible. The summary should be about 1000 words long.\n\n<transcript>{chunk}</transcript>' for chunk in chunks_of_text_content]
batched_chunks: List[List[str]] = batch_list(chunks_of_text_content)
bullet_generator: LanguageExpert = LanguageExpert(**bullet_expert)
bullet_generator.change_param("model_name", selected_model)
summarized_chunks: List[str] = []
for batch in batched_chunks:
summarized_batch: List[str] = bullet_generator.bulk_generate(batch)
summarized_chunks.extend(summarized_batch)
return ''.join(summarized_chunks)
if __name__ == '__main__':
app.run(debug=True) | [] |
2024-01-10 | Bradybry/Teams-Transcript-Summarizer | expert.py | import re
import json
from langchain.chat_models import ChatOpenAI, ChatAnthropic
from langchain.schema import HumanMessage, SystemMessage
import datetime
from pathlib import Path
import os
openai_api_key = os.environ.get('OPENAI_API_KEY')
anthropic_api_key = os.environ.get('ANTHROPIC_API_KEY')
if openai_api_key is None:
from dotenv import load_dotenv
load_dotenv()
openai_api_key = os.environ.get('OPENAI_API_KEY')
if anthropic_api_key is None:
from dotenv import load_dotenv
load_dotenv()
anthropic_api_key = os.environ.get('ANTHROPIC_API_KEY')
# At the moment langchain API wrappers are needed due to the separation of chat models and language models. These wrappers allow us to use the same interface for both.
# Class to communicate with OpenAI for generating responses. Wrapped around the langchain wrappers
class OpenAIModel():
def __init__(self, openai_api_key=None, **model_params):
if openai_api_key is None:
raise ValueError('OpenAI API key must be provided')
self.chat = ChatOpenAI(openai_api_key=openai_api_key, **model_params)
def __call__(self, request_messages):
return self.chat(request_messages).content
def bulk_generate(self, message_list):
return self.chat.generate(message_list)
class AnthropicModel():
def __init__(self, anthropic_api_key=None, **model_params):
if anthropic_api_key is None:
raise ValueError('Anthropic API key must be provided')
self.chat = ChatAnthropic(model=model_params['model_name'], max_tokens_to_sample=model_params['max_tokens'], anthropic_api_key=anthropic_api_key)
def __call__(self, request_messages):
# Convert request_messages into a single string to be used as preamble
return self.chat(request_messages)
def bulk_generate(self, message_list):
return self.chat.generate(message_list)
class LanguageExpert:
"""Defines an AI assistant/expert for natural language generation.
Attributes:
name (str): Name of the expert
system_message (str): Expert's initial greeting message
description (str): Description of the expert's abilities
example_input (str): Sample user input the expert can handle
example_output (str): Expert's response to the sample input
model_params (dict): Parameters to configure the language model
"""
def __init__(self, name: str, system_message=None, description=None,
example_input=None, example_output=None, model_params=None):
## Initialize expert attributes##
self.name = name
self.system_message = system_message
self.description = description
self.example_input = example_input
self.example_output = example_output
##Set default model parameters if none provided##
if model_params is None:
model_params = {"model_name": "claude-v1.3", "temperature": 0.00,
"frequency_penalty": 1.0, "presence_penalty": 0.5,
"n": 1, "max_tokens": 512}
self.model_params = model_params
self.gen_chat() #Generate the chat object to get model-specific responses
def serialize(self):
"""Returns a JSON-serializable representation of the expert.
Returns:
dict: Contains all expert attributes.
"""
return {
"name": self.name,
"system_message": self.system_message,
"description": self.description,
"example_input": self.example_input,
"example_output": self.example_output,
"model_params": self.model_params
}
def get_content(self):
"""Returns the expert definition in an fake XML format.
Returns:
SystemMessage: Expert definition wrapped in XML tags.
"""
content = '<assistant_definition>\n'
if self.name:
content += f'<name>{self.name}</name>\n'
if self.description:
content += f'<role>{self.description}</role>\n'
if self.system_message:
content += f'<system_message>{self.system_message}</system_message>\n'
if example_input := self.example_input:
content += f'<example_input>{example_input}</example_input>\n'
if example_output := self.example_output:
content += f'<example_output>{example_output}</example_output>\n'
content += '</assistant_definition>'
return SystemMessage(content=content)
def generate(self, message):
"""Generates a response to the input message.
Passes the input through the chat model and returns its response.
Parameters:
message (str): User's input message
Returns:
response (str): expert's response to the message
"""
human_message = HumanMessage(content=message)
request_message = [self.get_content(), human_message]
response = self.chat(request_message)
self.log([message], [response])
return response
def log(self, requests, responses):
"""Logs a conversation between the user and the expert.
Parameters:
requests (list): List of user requests/messages
responses (list): List of expert responses
"""
now = datetime.datetime.now()
filename = Path(f'./logs/{now.strftime("%Y-%m-%d_%H-%M-%S")}_{self.name}.txt')
filename.parent.mkdir(parents=True, exist_ok=True)
log = f'Expert Name: {self.name}\n\nRequests:\n'
for request in requests:
log += f'{request}\n\n'
log += 'Responses:\n'
for response in responses:
log += f'{response}\n\n'
with open(filename, 'w', encoding='utf-8') as f:
f.write(log)
def extract_texts_from_generations(self, generations):
"""Extracts plain text responses from a list of generated responses.
Parameters:
generations (list): List of generated responses from the model
Returns:
list: List of plain text responses
"""
return [generation[0].text for generation in generations]
def bulk_generate(self, messages:list):
"""Generates responses for multiple input messages.
Parameters:
messages (list): List of user input messages
Returns:
responses (list): List of corresponding expert responses
"""
human_messages = [HumanMessage(content=message) for message in messages]
request_messages = [[self.get_content(), human_message] for human_message in human_messages]
responses = self.chat.bulk_generate(request_messages)
responses = self.extract_texts_from_generations(responses.generations)
self.log(messages, responses)
return responses
def __call__(self, message:str):
"""Allows the expert to be called like a function.
Invokes the generate() method.
"""
return self.generate(message)
def change_param(self, parameter_name, new_value):
"""Changes a expert definition parameter to a new value.
Updates the internal model_params dictionary and regenerates
the chat object.
Parameters:
parameter_name (str): Name of the parameter to change
new_value: New value for the parameter
"""
if parameter_name in ["model_name", "temperature", "frequency_penalty", "presence_penalty", "n", "max_tokens"]:
self.__dict__["model_params"][parameter_name] = new_value
else:
self.__dict__[parameter_name] = new_value
self.gen_chat()
def gen_chat(self):
"""Instantiates the chat object used to generate responses.
The chat object is either an AnthropicModel or OpenAIModel, depending
on the model_name parameter.
"""
if 'gpt' in self.model_params["model_name"]:
self.chat = OpenAIModel(openai_api_key=openai_api_key, **self.model_params)
elif 'claude' in self.model_params["model_name"]:
self.chat = AnthropicModel(anthropic_api_key=anthropic_api_key, **self.model_params)
else:
raise 'Model not supported'
class Manager(object):
"""A class to manage and manipulate a collection of language experts.
Attributes:
fname (str): Filename from which to load/save language expert data.
experts (dict): A dictionary containing name and serialized data
of added language experts.
"""
def __init__(self, infile=None):
"""
Initializes a Manager object with the file name for storing/retrieving data.
Args:
infile (str, optional): Filename containing existing language expert data. Defaults to None.
"""
self.fname = infile
if infile is None:
self.experts = {}
else:
self.load(infile)
def add_expert(self, expert: LanguageExpert):
"""Add an expert to the dictionary of experts.
Parameters:
expert (LanguageExpert): expert to add
Saves to file (if fname defined)
"""
self.experts[expert.name] = expert.serialize()
if self.fname != None:
self.save(self.fname)
def delete_expert(self, expert_name):
"""Delete an expert from the dictionary of experts.
Parameters:
expert_name (str): name of expert to delete
"""
del self.experts[expert_name]
def __getitem__(self, key):
"""Create an expert object from the serialized expert dict.
Parameters:
key (str): name of expert to retrieve
Returns:
dict: corresponding expert object
"""
return self.get_expert(key)
def get_expert(self, expert_name):
"""Retrieve expert object from dictionary.
Parameters:
expert_name (str): name of expert to retrieve
Returns:
LanguageExpert: corresponding expert object
"""
return LanguageExpert(**self.experts[expert_name])
def save(self, outfile):
"""Save all experts to file. This will overwrite any existing file and only store the experts in the current manager.
Parameters:
outfile (str): file name to save experts to
"""
with open(outfile, 'w') as f:
json.dump(self.experts, f)
def load(self, infile):
"""Load experts from file, overwriting any currently stored in the manager.
Parameters:
infile (str): file name to load experts from
"""
with open(infile, 'r') as f:
self.experts = json.load(f)
def parse_assistant_definition(markdown_text):
# Define patterns for extracting different parts of the assistant definition
name_pattern = re.compile(r'<name>(.*?)<\/name>', re.DOTALL)
role_pattern = re.compile(r'<role>(.*?)<\/role>', re.DOTALL)
system_message_pattern = re.compile(r'<system_message>(.*?)<\/system_message>', re.DOTALL)
example_input_pattern = re.compile(r'<example_input>(.*?)<\/example_input>', re.DOTALL)
example_output_pattern = re.compile(r'<example_output>(.*?)<\/example_output>', re.DOTALL)
# Extract the role (as name), system_message, example_input, and example_output from the markdown text
name = name_pattern.search(markdown_text)[1].strip()
role = role_pattern.search(markdown_text)[1].strip()
system_message = system_message_pattern.search(markdown_text)[1].strip()
example_input = example_input_pattern.search(markdown_text)[1].strip()
example_output = example_output_pattern.search(markdown_text)[1].strip()
return {
'name': name,
'description': role,
'system_message': system_message,
'example_input': example_input,
'example_output': example_output,
} | [] |
2024-01-10 | AhmadCodes/twitterpersona | twitter_persona~twitter_persona.py | """Main module."""
# %%
import os
try:
from LLM.request_llm import get_response
from base_prompt import base_prompt as BASEPROMPT
from tweets_topic.get_topic import topic_extraction
from tweets_similarity_search.search_tweets import TweetsSimilarity
from tweets_irony.get_irony import irony_extraction
from tweets_sentiment.get_sentiment import sentiment_extraction
from tweets_emotion.get_emotions import emotion_extraction
from tweets_loader.csv_tweets_loader import loadCSVTweets
except ImportError:
from .base_prompt import base_prompt as BASEPROMPT
from .LLM.request_llm import get_response
from .tweets_topic.get_topic import topic_extraction
from .tweets_similarity_search.search_tweets import TweetsSimilarity
from .tweets_irony.get_irony import irony_extraction
from .tweets_sentiment.get_sentiment import sentiment_extraction
from .tweets_emotion.get_emotions import emotion_extraction
from .tweets_loader.csv_tweets_loader import loadCSVTweets
# %%
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.memory import ConversationBufferMemory
from langchain import OpenAI, PromptTemplate
from langchain.chains.question_answering import load_qa_chain
# from langchain.embeddings import ModelScopeEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate
from chromadb.config import Settings
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
import json
import requests
import json
import time
# %%
from langchain.embeddings import HuggingFaceEmbeddings
# %%
class TwitterPersona:
def __init__(self,
personality_username: str,
persona_name="Elon Musk",
debug=False) -> None:
self.NAME = persona_name
self.debug = debug
self.personality_username = personality_username
self.persona_tweets = self.get_tweets(self.personality_username)[:50]
self.tweets_similarity = TweetsSimilarity(self.persona_tweets,
debug=self.debug)
self.get_all_snippets()
def get_tweets(self, personality_username):
return loadCSVTweets(cleanTweets=True)
def get_emotions_prompt_snippet(self):
_, emotions_percentage_dict = emotion_extraction(self.persona_tweets,
thresh=0.7,
top_k=3,
debug=self.debug)
emotions_percentages = [[k, v*100]
for k, v in emotions_percentage_dict.items()]
line_seperated_emotions_percentages = "\n".join(
[f"- {k.replace('_', ' ')}: {(v):.2f}%" for k, v in emotions_percentages])
emotions_prompt_snippet = """\nEmotions:\n{}\n""".format(line_seperated_emotions_percentages)
return emotions_prompt_snippet
def get_topic_prompt_snippet(self):
_, topic_percentages_dict = topic_extraction(self.persona_tweets,
thresh=0.7,
debug=self.debug)
line_seperated_topic_percentages = "\n".join(
[f"- {k.replace('_', ' ')}: {(v*100):.2f}%" for k, v in topic_percentages_dict.items()])
topic_prompt_snippet = """Topic:\n{}\n""".format(line_seperated_topic_percentages)
return topic_prompt_snippet
def get_sentiment_prompt_snippet(self):
_, sentiment_percentages_dict = sentiment_extraction(self.persona_tweets,
debug=self.debug)
line_seperated_sentiment_percentages = "\n".join(
[f"- {k.replace('_', ' ')}: {(v*100):.2f}%" for k, v in sentiment_percentages_dict.items()])
sentiment_prompt_snippet = """\nSentiments:\n{}\n""".format(line_seperated_sentiment_percentages)
return sentiment_prompt_snippet
def get_irony_prompt_snippet(self):
_, irony_percentages_dict = irony_extraction(self.persona_tweets,
thresh=0.7,
debug=self.debug)
line_seperated_irony_percentages = "\n".join(
[f"- {k.replace('_', ' ')}: {(v*100):.2f}%" for k, v in irony_percentages_dict.items()])
irony_prompt_snippet = """\nIrony:\n{}\n""".format(line_seperated_irony_percentages)
return irony_prompt_snippet
def get_all_snippets(self):
print(f"Analysing {self.NAME}'s {len(self.persona_tweets)} Tweets for Personality Analysis...")
print("Please Wait. This may take a while...\n")
self.emotions_prompt_snippet = self.get_emotions_prompt_snippet()
self.topic_prompt_snippet = self.get_topic_prompt_snippet()
self.sentiment_prompt_snippet = self.get_sentiment_prompt_snippet()
self.irony_prompt_snippet = self.get_irony_prompt_snippet()
self.all_snippets = "\n".join([self.emotions_prompt_snippet,
self.topic_prompt_snippet,
self.sentiment_prompt_snippet,
self.irony_prompt_snippet])
self.all_snippets = f"{self.NAME}'s Tweet Analysis (Percentages):" + self.all_snippets
def get_similar_tweets(self, query_tweet):
matching_tweets = self.tweets_similarity.get_similar_tweets(query_tweet,
thresh=0.7,
top_k=5)
line_seperated_tweets = "\n".join(matching_tweets)
similar_tweets_prompt_snippet = """Their Similar Tweets to the user query:\n{}\n""".format(line_seperated_tweets)
if line_seperated_tweets == "":
similar_tweets_prompt_snippet = ""
return similar_tweets_prompt_snippet
# %%
class ConversationBot:
def __init__(self,
persona_object,
user_name="Max",
debug=False) -> None:
self.user_name = user_name
self.debug = debug
self.persona = persona_object
self.NAME = self.persona.NAME
self.all_snippets = self.persona.all_snippets
self.personality_username = self.persona.personality_username
# self.embeddings = HuggingFaceEmbeddings(model_name="hkunlp/instructor-xl", model_kwargs=model_kwargs,)
self.embeddings = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={'device': 'cuda:0'})
HOME = os.path.expanduser('~')
twitterpersona_path = os.path.join(HOME,
".twitterpersona")
# '/.db_instructor_xl'
self.persist_directory = os.path.join(
twitterpersona_path,
f'.{self.user_name}_{self.personality_username}_db')
if not os.path.exists(self.persist_directory):
os.makedirs(self.persist_directory, exist_ok=True)
CHROMA_SETTINGS = Settings(
chroma_db_impl='duckdb+parquet',
persist_directory=self.persist_directory,
anonymized_telemetry=False
)
self.text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000, chunk_overlap=200)
curr_file_path = os.path.dirname(os.path.realpath(__file__))
prev_converstion_path = os.path.join(
twitterpersona_path, f"{self.user_name}_{self.personality_username}_prev_conversation.txt")
if not os.path.exists(twitterpersona_path):
os.makedirs(twitterpersona_path, exist_ok=True)
if not os.path.exists(prev_converstion_path):
with open(prev_converstion_path, "w") as f:
f.write(f"{self.user_name}: . . .")
loader = TextLoader(prev_converstion_path)
texts_ = loader.load()
texts = self.text_splitter.create_documents([texts_[0].page_content])
db = Chroma.from_documents(
texts, self.embeddings, persist_directory=self.persist_directory)
db.persist()
self.answers = []
self.conv_buffer = []
self.avg_retrival_time = 0
self.avg_generation_time = 0
self.avg_store_time = 0
self.avg_total_time = 0
self.texts = []
self.count = 0
def reply_user(self, query):
self.count += 1
starting_time = time.time()
query = str(query)
# print(f"{self.user_name}: {query}\n")
self.conv_buffer.append(f"{self.user_name}: {query}")
vectordb = Chroma(persist_directory=self.persist_directory,
embedding_function=self.embeddings)
# without similarity score
context = vectordb.similarity_search_with_score(query)
retriever = vectordb.as_retriever(search_type="mmr")
context = retriever.get_relevant_documents(query)
context = context[0].page_content
# print(f"""Context Fetched for query: "{query}" :\n""",context)
retrival_time = time.time() - starting_time
# print(f"Retrieval Time: {retrival_time}")
time_after_retrieval = time.time()
last_convo = self.conv_buffer[-20:]
last_convo_str = "\n".join(last_convo)
matching_tweets = self.persona.get_similar_tweets(query)
base_prompt = BASEPROMPT.format(self.NAME,
self.personality_username,
self.user_name)
system_prompt = f"""
{base_prompt}
{self.all_snippets}
{matching_tweets}
The memory of the previous conversation between {self.NAME} and {self.user_name}:
{context}
Continuing from the last conversation where {self.NAME} greets {self.user_name} with reference to the last conversation:
{last_convo_str}\n"""
message = query
prompt = f"{system_prompt}{self.NAME}:"
while "\n\n" in prompt:
prompt = prompt.replace("\n\n", "\n")
while " " in prompt:
prompt = prompt.replace(" ", "")
self.prompt = prompt
# PROMTING THE LLM HERE
response = get_response(prompt)
generation_time = time.time() - time_after_retrieval
person = response.replace(prompt, "").split("/n")[0]
if person.rfind(":") > 0:
person = person[:person.rfind(":")]
if person[-1] not in ['.', '?', '!']:
if person.rfind(".") > person.rfind("?") and person.rfind(".") > person.rfind("!"):
# print(f"Truncated from response: {person[person.rfind('.')+1:]}")
person = person[:person.rfind(".")] + person[person.rfind(".")]
elif person.rfind("?") > person.rfind(".") and person.rfind("?") > person.rfind("!"):
# print(f"Truncated from response: {person[person.rfind('.')+1:]}")
person = person[:person.rfind("?")] + person[person.rfind("?")]
elif person.rfind("!") > person.rfind(".") and person.rfind("!") > person.rfind("?"):
# print(f"Truncated from response: {person[person.rfind('.')+1:]}")
person = person[:person.rfind("!")] + person[person.rfind("!")]
len_response = len(person.split())
time_after_generation = time.time()
# print(f"""{self.NAME}: {person} \n\n""")
self.conv_buffer.append(f"{self.NAME}: {person}")
self.answers.append(person)
dialog = f"{self.user_name}: {query}\n{self.NAME}: {person}\n"
self.texts.append(dialog)
if self.count % 3 == 0:
texts_str = "\n".join(self.texts)
textss = self.text_splitter.create_documents([texts_str])
db = Chroma.from_documents(
textss, self.embeddings, persist_directory=self.persist_directory)
db.persist()
self.texts = []
store_time = time.time() - time_after_generation
return person
# %%
if __name__ == "__main__":
elonpersona = TwitterPersona("elonmusk")
bot = ConversationBot(elonpersona)
with open("../sample_data/sampleQuestions.txt") as f:
questions = f.readlines()
for question in questions:
A = bot.reply_user(question)
print(f"{bot.user_name}: {question}Elon Musk:{A} \n\n")
# %%
| [
"\n\n",
"\n",
"Topic:\nPLACEHOLDER\n",
" ",
"\nIrony:\nPLACEHOLDER\n",
"\nSentiments:\nPLACEHOLDER\n",
"Their Similar Tweets to the user query:\nPLACEHOLDER\n",
"\nEmotions:\nPLACEHOLDER\n"
] |
2024-01-10 | manyuzhang1996/FAANG-Stock-Price-Prediction-under-Layoffs-and-AI-Boost | Text%20Mining%20and%20LDA%20Modeling~lda_modeling.py | import matplotlib.pyplot as plt
from nltk.tokenize import word_tokenize
from gensim import corpora, models
from gensim.models import CoherenceModel
from wordcloud import WordCloud
def calculate_coherence(corpus, dictionary, texts, num_topics):
lda_model = models.LdaModel(corpus, num_topics=num_topics, id2word=dictionary, passes=15)
coherence_model = CoherenceModel(model=lda_model, texts=texts, dictionary=dictionary, coherence='c_v')
coherence_score = coherence_model.get_coherence()
return coherence_score
def lda_modeling_experiment(df):
texts = df['title'].tolist()
texts = [word_tokenize(text) for text in texts]
texts = [[word for word in text if word.isalpha()] for text in texts]
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
seed = 42
num_topics_list = [2, 3, 4, 5, 6, 7, 8, 9, 10]
coherence_scores = [calculate_coherence(corpus, dictionary, texts, num_topics) for num_topics in num_topics_list]
optimal_topics = num_topics_list[coherence_scores.index(max(coherence_scores))]
# Visualize the results
plt.plot(num_topics_list, coherence_scores, marker='o')
plt.xlabel('Number of Topics')
plt.ylabel('Coherence Score')
plt.title('Optimal Number of Topics Selection')
plt.show()
print(f"The optimal number of topics is {optimal_topics}")
return corpus, optimal_topics, dictionary
def optimal_lda_modeling(corpus, optimal_topics, dictionary):
lda_model = models.LdaModel(corpus, num_topics=optimal_topics, id2word=dictionary, passes=15)
for topic_id, topic_words in lda_model.print_topics():
print(f'\nTopic {topic_id + 1}: {topic_words}')
return lda_model
def visualize_lda_topics(lda_model, num_topics):
topics = [lda_model.show_topic(topic_id) for topic_id in range(num_topics)]
for i, topic_words in enumerate(topics):
wordcloud = WordCloud(width=800, height=400, background_color='white').generate_from_frequencies(dict(topic_words))
plt.figure(figsize=(6, 4))
plt.imshow(wordcloud, interpolation='bilinear')
plt.title(f'Topic {i + 1} Word Cloud')
plt.axis('off')
plt.show() | [] |
2024-01-10 | krylm/whisper-event-tuning | run_speech_recognition_seq2seq_streaming_mikr.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for sequence to sequence speech recognition
with 🤗 Datasets' streaming mode.
"""
# You can also adapt this script for your own sequence to sequence speech
# recognition task. Pointers for this are left as comments.
import logging
import os
import sys
import numpy as np
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import torch
from datasets import DatasetDict, IterableDatasetDict, interleave_datasets, load_dataset
from torch.utils.data import IterableDataset
import evaluate
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForSpeechSeq2Seq,
AutoProcessor,
AutoTokenizer,
HfArgumentParser,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
TrainerCallback,
set_seed,
)
from transformers.models.whisper.english_normalizer import BasicTextNormalizer
from transformers.trainer_pt_utils import IterableDatasetShard
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.25.0.dev0")
require_version("datasets>=1.18.2", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
feature_extractor_name: Optional[str] = field(
default=None, metadata={"help": "feature extractor name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},
)
freeze_feature_encoder: bool = field(
default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
)
freeze_encoder: bool = field(
default=False, metadata={"help": "Whether to freeze the entire encoder of the seq2seq model."}
)
forced_decoder_ids: List[List[int]] = field(
default=None,
metadata={
"help": (
"A list of pairs of integers which indicates a mapping from generation indices to token indices "
"that will be forced before sampling. For example, [[0, 123]] means the first generated token "
"will always be a token of index 123."
)
},
)
suppress_tokens: List[int] = field(
default=None, metadata={"help": "A list of tokens that will be suppressed at generation."}
)
model_index_name: str = field(default=None, metadata={"help": "Pretty name for the model card."})
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: str = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
text_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
audio_column_name: str = field(
default="audio",
metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
)
text_column_name: str = field(
default="text",
metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
)
max_duration_in_seconds: float = field(
default=20.0,
metadata={
"help": (
"Truncate audio files that are longer than `max_duration_in_seconds` seconds to"
" 'max_duration_in_seconds`"
)
},
)
min_duration_in_seconds: float = field(
default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
)
train_split_name: str = field(
default="train",
metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
},
)
eval_split_name: str = field(
default="test",
metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
},
)
do_lower_case: bool = field(
default=False,
metadata={"help": "Whether the target text should be lower cased."},
)
do_remove_punctuation: bool = field(
default=False,
metadata={"help": "Whether the target text should be striped of punctuation."},
)
do_normalize_eval: bool = field(
default=True,
metadata={"help": "Whether to normalise the references and predictions in the eval WER calculation."},
)
language: str = field(
default=None,
metadata={
"help": (
"Language for multilingual fine-tuning. This argument should be set for multilingual fine-tuning "
"only. For English speech recognition, it should be set to `None`."
)
},
)
task: str = field(
default="transcribe",
metadata={"help": "Task, either `transcribe` for speech recognition or `translate` for speech translation."},
)
shuffle_buffer_size: Optional[int] = field(
default=500,
metadata={
"help": (
"The number of streamed examples to download before shuffling them. The large the buffer, "
"the closer it is to real offline shuffling."
)
},
)
streaming: bool = field(
default=True,
metadata={"help": "Whether to use streaming mode to load and pre-process the data."},
)
@dataclass
class DataCollatorSpeechSeq2SeqWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
processor ([`WhisperProcessor`])
The processor used for processing the data.
decoder_start_token_id (`int`)
The begin-of-sentence of the decoder.
"""
processor: Any
decoder_start_token_id: int
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lengths and need
# different padding methods
model_input_name = self.processor.model_input_names[0]
input_features = [{model_input_name: feature[model_input_name]} for feature in features]
label_features = [{"input_ids": feature["labels"]} for feature in features]
batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt")
labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt")
# replace padding with -100 to ignore loss correctly
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
# if bos token is appended in previous tokenization step,
# cut bos token here as it's append later anyways
if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item():
labels = labels[:, 1:]
batch["labels"] = labels
return batch
def load_maybe_streaming_dataset(dataset_name, dataset_config_name, split="train", streaming=True, **kwargs):
"""
Utility function to load a dataset in streaming mode. For datasets with multiple splits,
each split is loaded individually and then splits combined by taking alternating examples from
each (interleaving).
"""
if "+" in split:
# load multiple splits separated by the `+` symbol with streaming mode
dataset_splits = [
load_dataset(dataset_name, dataset_config_name, split=split_name, streaming=streaming, **kwargs)
for split_name in split.split("+")
]
# interleave multiple splits to form one dataset
interleaved_dataset = interleave_datasets(dataset_splits)
return interleaved_dataset
else:
# load a single split *with* streaming mode
dataset = load_dataset(dataset_name, dataset_config_name, split=split, streaming=streaming, **kwargs)
return dataset
def main():
# 1. Parse input arguments
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_speech_recognition_seq2seq_streaming", model_args, data_args)
# 2. Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s", training_args)
# 3. Detecting last checkpoint and eventually continue from last checkpoint
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# 4. Load dataset
raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict()
if training_args.do_train:
raw_datasets["train"] = load_maybe_streaming_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.train_split_name,
use_auth_token=True if model_args.use_auth_token else None,
streaming=data_args.streaming,
)
if training_args.do_eval:
raw_datasets["eval"] = load_maybe_streaming_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.eval_split_name,
use_auth_token=True if model_args.use_auth_token else None,
streaming=data_args.streaming,
)
raw_datasets_features = list(next(iter(raw_datasets.values())).features.keys())
if data_args.audio_column_name not in raw_datasets_features:
raise ValueError(
f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f"{', '.join(raw_datasets_features)}."
)
if data_args.text_column_name not in raw_datasets_features:
raise ValueError(
f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--text_column_name` to the correct text column - one of "
f"{', '.join(raw_datasets_features)}."
)
# 5. Load pretrained model, tokenizer, and feature extractor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config.update({"forced_decoder_ids": model_args.forced_decoder_ids, "suppress_tokens": model_args.suppress_tokens})
if training_args.gradient_checkpointing:
config.update({"use_cache": False})
feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSpeechSeq2Seq.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if model_args.freeze_encoder:
model.freeze_encoder()
if data_args.language is not None:
# We only need to set the task id when the language is specified (i.e. in a multilingual setting)
tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task)
# 6. Resample speech dataset if necessary
dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
if dataset_sampling_rate != feature_extractor.sampling_rate:
raw_datasets = raw_datasets.cast_column(
data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
)
# 7. Preprocessing the datasets.
# We need to read the audio files as arrays and tokenize the targets.
max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
audio_column_name = data_args.audio_column_name
text_column_name = data_args.text_column_name
model_input_name = feature_extractor.model_input_names[0]
do_lower_case = data_args.do_lower_case
do_remove_punctuation = data_args.do_remove_punctuation
normalizer = BasicTextNormalizer() # 'official' text normalizer from OpenAI
if data_args.max_train_samples is not None:
raw_datasets["train"] = (
raw_datasets["train"].take(data_args.max_train_samples)
if data_args.streaming
else raw_datasets["train"].select(range(data_args.max_train_samples))
)
if data_args.max_eval_samples is not None:
raw_datasets["eval"] = (
raw_datasets["eval"].take(data_args.max_eval_samples)
if data_args.streaming
else raw_datasets["eval"].select(range(data_args.max_eval_samples))
)
def prepare_dataset(batch):
# process audio
sample = batch[audio_column_name]
inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
# process audio length
batch[model_input_name] = inputs.get(model_input_name)[0]
batch["input_length"] = len(sample["array"])
# process targets
input_str = batch[text_column_name].lower() if do_lower_case else batch[text_column_name]
if do_remove_punctuation:
input_str = normalizer(input_str).strip()
batch["labels"] = tokenizer(input_str).input_ids
return batch
def prepare_dataset_concatenated(batch):
""" Milan Kryl @mikr - combine multiple items into one up to a maximum duration"""
MAX_AUDIO_DURATION = 30
result = {
model_input_name: [],
'input_features': [],
'labels': []
}
buffer = {
'duration': 0,
'array': [],
'text': []
}
sample = batch[audio_column_name][0]
batch_size = len(batch[text_column_name])
for i in range(batch_size):
audio_array = batch[audio_column_name][i]['array']
text = batch[text_column_name][i].lower() if do_lower_case else batch[text_column_name][i]
if do_remove_punctuation:
text = normalizer(text).strip()
audio_duration = audio_array.shape[0] / sample['sampling_rate']
if buffer['duration'] + audio_duration < MAX_AUDIO_DURATION:
buffer['array'].append(audio_array)
buffer['text'].append(text)
buffer['duration'] += audio_duration
else:
tokens = tokenizer(' '.join(buffer['text'])).input_ids
result['labels'].append(tokens)
feature = feature_extractor(np.concatenate(buffer['array']), sampling_rate=sample['sampling_rate']).input_features[0]
result['input_features'].append(feature)
buffer = {
'duration': audio_duration,
'array': [audio_array],
'text': [text]
}
# flush buffer
if len(buffer['array']):
tokens = tokenizer(' '.join(buffer['text'])).input_ids
result['labels'].append(tokens)
feature = feature_extractor(np.concatenate(buffer['array']), sampling_rate=sample['sampling_rate']).input_features[0]
result['input_features'].append(feature)
return result
with training_args.main_process_first(desc="dataset map pre-processing"):
vectorized_datasets = raw_datasets.map(
prepare_dataset_concatenated,
batched=True,
batch_size=64,
remove_columns=raw_datasets_features,
).with_format("torch")
if training_args.do_train and data_args.streaming:
# manually shuffle if streaming (done by the trainer for non-streaming)
vectorized_datasets["train"] = vectorized_datasets["train"].shuffle(
buffer_size=data_args.shuffle_buffer_size,
seed=training_args.seed,
)
# filter training data that is shorter than min_input_length or longer than
# max_input_length
def is_audio_in_length_range(length):
return min_input_length < length < max_input_length
# 8. Load Metric
metric = evaluate.load("wer")
do_normalize_eval = data_args.do_normalize_eval
def compute_metrics(pred):
pred_ids = pred.predictions
pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
# we do not want to group tokens when computing the metrics
label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True)
if do_normalize_eval:
pred_str = [normalizer(pred) for pred in pred_str]
label_str = [normalizer(label) for label in label_str]
# filtering step to only evaluate the samples that correspond to non-zero references:
pred_str = [pred_str[i] for i in range(len(pred_str)) if len(label_str[i]) > 0]
label_str = [label_str[i] for i in range(len(label_str)) if len(label_str[i]) > 0]
wer = 100 * metric.compute(predictions=pred_str, references=label_str)
return {"wer": wer}
# 9. Create a single speech processor
if is_main_process(training_args.local_rank):
# save feature extractor, tokenizer and config
feature_extractor.save_pretrained(training_args.output_dir)
tokenizer.save_pretrained(training_args.output_dir)
config.save_pretrained(training_args.output_dir)
processor = AutoProcessor.from_pretrained(training_args.output_dir)
# 10. Define data collator
data_collator = DataCollatorSpeechSeq2SeqWithPadding(
processor=processor,
decoder_start_token_id=model.config.decoder_start_token_id,
)
# 11. Configure Trainer
# Trainer callback to reinitialise and reshuffle the streamable datasets at the beginning of each epoch
# Only required for streaming: Trainer automatically shuffles non-streaming datasets
class ShuffleCallback(TrainerCallback):
def on_epoch_begin(self, args, state, control, train_dataloader, **kwargs):
if isinstance(train_dataloader.dataset, IterableDatasetShard):
pass # set_epoch() is handled by the Trainer
elif isinstance(train_dataloader.dataset, IterableDataset):
train_dataloader.dataset.set_epoch(train_dataloader.dataset._epoch + 1)
# Initialize Trainer
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
tokenizer=feature_extractor,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
callbacks=[ShuffleCallback()] if data_args.streaming else None,
)
# 12. Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the feature extractor too for easy upload
metrics = train_result.metrics
if data_args.max_train_samples:
metrics["train_samples"] = data_args.max_train_samples
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# 13. Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(
metric_key_prefix="eval",
max_length=training_args.generation_max_length,
num_beams=training_args.generation_num_beams,
)
if data_args.max_eval_samples:
metrics["eval_samples"] = data_args.max_eval_samples
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# 14. Write Training Stats
kwargs = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "automatic-speech-recognition",
"tags": "whisper-event",
}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if "common_voice" in data_args.dataset_name:
kwargs["language"] = data_args.dataset_config_name.split('-')[0]
if model_args.model_index_name is not None:
kwargs["model_name"] = model_args.model_index_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return results
if __name__ == "__main__":
main()
| [] |
2024-01-10 | bohuizhang/ontogpt | src~ontogpt~engines~spires_engine.py | """
Main Knowledge Extractor class.
This works by recursively constructing structured prompt-completions where
a pseudo-YAML structure is requested, where the YAML
structure corresponds to a template class.
Describe in the SPIRES manuscript
TODO: add link
"""
import logging
import re
import uuid
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
import pydantic
import yaml
from linkml_runtime.linkml_model import ClassDefinition, SlotDefinition
from oaklib import BasicOntologyInterface
from ontogpt.engines.knowledge_engine import (
ANNOTATION_KEY_PROMPT,
ANNOTATION_KEY_PROMPT_SKIP,
EXAMPLE,
FIELD,
OBJECT,
KnowledgeEngine,
chunk_text,
)
from ontogpt.io.yaml_wrapper import dump_minimal_yaml
from ontogpt.templates.core import ExtractionResult
this_path = Path(__file__).parent
RESPONSE_ATOM = Union[str, "ResponseAtom"] # type: ignore
RESPONSE_DICT = Dict[FIELD, Union[RESPONSE_ATOM, List[RESPONSE_ATOM]]]
@dataclass
class SPIRESEngine(KnowledgeEngine):
"""Knowledge extractor."""
engine: str = "gpt-3.5-turbo-instruct"
recurse: bool = True
"""If true, then complex non-named entity objects are always recursively parsed.
If this is false AND the complex object is a pair, then token-based splitting is
instead used.
TODO: deprecate this, it's not clear that token-based splitting is better, due to
the inability to control which tokens GPT will use"""
sentences_per_window: Optional[int] = None
"""If set, this will split the text into chains of sentences,
where this determines the maximum number of sentences per chain.
The results are then merged together."""
def extract_from_text(
self,
text: str,
cls: ClassDefinition = None,
object: OBJECT = None,
show_prompt: bool = False,
) -> ExtractionResult:
"""
Extract annotations from the given text.
:param text:
:param cls:
:param object: optional stub object
:return:
"""
if self.sentences_per_window:
chunks = chunk_text(text, self.sentences_per_window)
extracted_object = None
for chunk in chunks:
raw_text = self._raw_extract(chunk, cls=cls, object=object, show_prompt=show_prompt)
logging.info(f"RAW TEXT: {raw_text}")
next_object = self.parse_completion_payload(
raw_text, cls, object=object # type: ignore
)
if extracted_object is None:
extracted_object = next_object
else:
for k, v in next_object.items():
if isinstance(v, list):
extracted_object[k] += v
else:
if k not in extracted_object:
extracted_object[k] = v
else:
extracted_object[k] = v
else:
raw_text = self._raw_extract(text=text, cls=cls, object=object, show_prompt=show_prompt)
logging.info(f"RAW TEXT: {raw_text}")
extracted_object = self.parse_completion_payload(
raw_text, cls, object=object # type: ignore
)
return ExtractionResult(
input_text=text,
raw_completion_output=raw_text,
prompt=self.last_prompt,
extracted_object=extracted_object,
named_entities=self.named_entities,
)
def _extract_from_text_to_dict(self, text: str, cls: ClassDefinition = None) -> RESPONSE_DICT:
raw_text = self._raw_extract(text=text, cls=cls)
return self._parse_response_to_dict(raw_text, cls)
def generate_and_extract(
self, entity: str, prompt_template: str = "", show_prompt: bool = False, **kwargs
) -> ExtractionResult:
"""
Generate a description using GPT and then extract from it using SPIRES.
:param entity:
:param kwargs:
:return:
"""
if prompt_template is None:
prompt_template = "Generate a comprehensive description of {entity}.\n"
prompt = prompt_template.format(entity=entity)
if self.client is not None:
payload = self.client.complete(prompt, show_prompt)
else:
payload = ""
return self.extract_from_text(payload, **kwargs)
def iteratively_generate_and_extract(
self,
entity: str,
cache_path: Union[str, Path],
iteration_slots: List[str],
adapter: BasicOntologyInterface = None,
clear=False,
max_iterations=10,
prompt_template=None,
show_prompt: bool = False,
**kwargs,
) -> Iterator[ExtractionResult]:
def _remove_parenthetical_context(s: str):
return re.sub(r"\(.*\)", "", s)
iteration = 0
if isinstance(cache_path, str):
cache_path = Path(cache_path)
if cache_path.exists() and not clear:
db = yaml.safe_load(cache_path.open())
if "entities_in_queue" not in db:
db["entities_in_queue"] = []
else:
db = {"processed_entities": [], "entities_in_queue": [], "results": []}
if entity not in db["processed_entities"]:
db["entities_in_queue"].append(entity)
if prompt_template is None:
prompt_template = (
"Generate a comprehensive description of {entity}. "
+ "The description should include the information on"
+ " and ".join(iteration_slots)
+ ".\n"
)
while db["entities_in_queue"] and iteration < max_iterations:
iteration += 1
next_entity = db["entities_in_queue"].pop(0)
logging.info(f"ITERATION {iteration}, entity={next_entity}")
# check if entity matches a curie pattern using re
if re.match(r"^[A-Z]+:[A-Z0-9]+$", next_entity):
curie = next_entity
next_entity = adapter.label(next_entity)
else:
curie = None
result = self.generate_and_extract(
next_entity, prompt_template=prompt_template, show_prompt=show_prompt, **kwargs
)
if curie:
if result.extracted_object:
result.extracted_object.id = curie
db["results"].append(result)
db["processed_entities"].append(next_entity)
yield result
for s in iteration_slots:
# if s not in result.extracted_object:
# raise ValueError(f"Slot {s} not found in {result.extracted_object}")
vals = getattr(result.extracted_object, s, [])
if not vals:
logging.info("dead-end: no values found for slot")
continue
if not isinstance(vals, list):
vals = [vals]
for val in vals:
entity = val
if result.named_entities is not None:
for ne in result.named_entities:
if ne.id == val:
entity = ne.label
if ne.id.startswith("AUTO"):
# Sometimes the value of some slots will lack
context = next_entity
context = re.sub(r"\(.*\)", "", context)
entity = f"{entity} ({context})"
else:
entity = ne.id
break
queue_deparenthesized = [
_remove_parenthetical_context(e) for e in db["entities_in_queue"]
]
if (
entity not in db["processed_entities"]
and entity not in db["entities_in_queue"]
and _remove_parenthetical_context(entity) not in queue_deparenthesized
):
db["entities_in_queue"].append(entity)
with open(cache_path, "w") as f:
# TODO: consider a more robust backend e.g. mongo
f.write(dump_minimal_yaml(db))
def generalize(
self,
object: Union[pydantic.BaseModel, dict],
examples: List[EXAMPLE],
show_prompt: bool = False,
) -> ExtractionResult:
"""
Generalize the given examples.
:param object:
:param examples:
:return:
"""
cls = self.template_class
sv = self.schemaview
prompt = "example:\n"
for example in examples:
prompt += f"{self.serialize_object(example)}\n\n"
prompt += "\n\n===\n\n"
if isinstance(object, pydantic.BaseModel):
object = object.dict()
for k, v in object.items():
if v:
slot = sv.induced_slot(k, cls.name)
prompt += f"{k}: {self._serialize_value(v, slot)}\n"
logging.debug(f"PROMPT: {prompt}")
payload = self.client.complete(prompt, show_prompt)
prediction = self.parse_completion_payload(payload, object=object)
return ExtractionResult(
input_text=prompt,
raw_completion_output=payload,
# prompt=self.last_prompt,
results=[prediction],
named_entities=self.named_entities,
)
def map_terms(
self, terms: List[str], ontology: str, show_prompt: bool = False
) -> Dict[str, str]:
"""
Map the given terms to the given ontology.
EXPERIMENTAL
currently GPT-3 does not do so well with this task.
:param terms:
:param ontology:
:return:
"""
# TODO: make a separate config
examples = {
"go": {
"nucleui": "nucleus",
"mitochondrial": "mitochondrion",
"signaling": "signaling pathway",
"cysteine biosynthesis": "cysteine biosynthetic process",
"alcohol dehydrogenase": "alcohol dehydrogenase activity",
},
"uberon": {
"feet": "pes",
"forelimb, left": "left forelimb",
"hippocampus": "Ammons horn",
},
}
ontology = ontology.lower()
if ontology in examples:
example = examples[ontology]
else:
example = examples["uberon"]
prompt = "Normalize the following semicolon separated\
list of terms to the {ontology.upper()} ontology\n\n"
prompt += "For example:\n\n"
for k, v in example.items():
prompt += f"{k}: {v}\n"
prompt += "===\n\nTerms:"
prompt += "; ".join(terms)
prompt += "===\n\n"
payload = self.client.complete(prompt, show_prompt)
# outer parse
best_results: List[str] = []
for sep in ["\n", "; "]:
results = payload.split(sep)
if len(results) > len(best_results):
best_results = results
def normalize(s: str) -> str:
s = s.strip()
s.replace("_", " ")
return s.lower()
mappings = {}
for result in best_results:
if ":" not in result:
logging.error(f"Count not parse result: {result}")
continue
k, v = result.strip().split(":", 1)
k = k.strip()
v = v.strip()
for t in terms:
if normalize(t) == normalize(k):
mappings[t] = v
break
for t in terms:
if t not in mappings:
logging.warning(f"Could not map term: {t}")
return mappings
def serialize_object(self, example: EXAMPLE, cls: ClassDefinition = None) -> str:
if cls is None:
cls = self.template_class
if isinstance(example, str):
return example
if isinstance(example, pydantic.BaseModel):
example = example.dict()
lines = []
sv = self.schemaview
for k, v in example.items():
if not v:
continue
slot = sv.induced_slot(k, cls.name)
v_serialized = self._serialize_value(v, slot)
lines.append(f"{k}: {v_serialized}")
return "\n".join(lines)
def _serialize_value(self, val: Any, slot: SlotDefinition) -> str:
if val is None:
return ""
if isinstance(val, list):
return "; ".join([self._serialize_value(v, slot) for v in val if v])
if isinstance(val, dict):
return " - ".join([self._serialize_value(v, slot) for v in val.values() if v])
sv = self.schemaview
if slot.range in sv.all_classes():
if self.labelers:
labelers = list(self.labelers)
else:
labelers = []
labelers += self.get_annotators(sv.get_class(slot.range))
if labelers:
for labeler in labelers:
label = labeler.label(val)
if label:
return label
return val
def _raw_extract(
self,
text,
cls: ClassDefinition = None,
object: OBJECT = None,
show_prompt: bool = False,
) -> str:
"""
Extract annotations from the given text.
:param text:
:return:
"""
prompt = self.get_completion_prompt(cls=cls, text=text, object=object)
self.last_prompt = prompt
payload = self.client.complete(prompt=prompt, show_prompt=show_prompt)
return payload
def get_completion_prompt(
self, cls: ClassDefinition = None, text: str = "", object: OBJECT = None
) -> str:
"""Get the prompt for the given template."""
if cls is None:
cls = self.template_class
if not text or ("\n" in text or len(text) > 60):
prompt = (
"From the text below, extract the following entities in the following format:\n\n"
)
else:
prompt = "Split the following piece of text into fields in the following format:\n\n"
for slot in self.schemaview.class_induced_slots(cls.name):
if ANNOTATION_KEY_PROMPT_SKIP in slot.annotations:
continue
if ANNOTATION_KEY_PROMPT in slot.annotations:
slot_prompt = slot.annotations[ANNOTATION_KEY_PROMPT].value
elif slot.description:
slot_prompt = slot.description
else:
if slot.multivalued:
slot_prompt = f"semicolon-separated list of {slot.name}s"
else:
slot_prompt = f"the value for {slot.name}"
if slot.range in self.schemaview.all_enums():
enum_def = self.schemaview.get_enum(slot.range)
pvs = [str(k) for k in enum_def.permissible_values.keys()]
slot_prompt += f"Must be one of: {', '.join(pvs)}"
prompt += f"{slot.name}: <{slot_prompt}>\n"
# prompt += "Do not answer if you don't know\n\n"
prompt = f"{prompt}\n\nText:\n{text}\n\n===\n\n"
if object:
if cls is None:
cls = self.template_class
if isinstance(object, pydantic.BaseModel):
object = object.dict()
for k, v in object.items():
if v:
slot = self.schemaview.induced_slot(k, cls.name)
prompt += f"{k}: {self._serialize_value(v, slot)}\n"
return prompt
def _parse_response_to_dict(
self, results: str, cls: ClassDefinition = None
) -> Optional[RESPONSE_DICT]:
"""
Parse the pseudo-YAML response from OpenAI into a dictionary object.
E.g.
foo: a; b; c
becomes
{"foo": ["a", "b", "c"]}
:param results:
:return:
"""
lines = results.splitlines()
ann = {}
promptable_slots = self.promptable_slots(cls)
for line in lines:
line = line.strip()
if not line:
continue
if ":" not in line:
if len(promptable_slots) == 1:
slot = promptable_slots[0]
logging.warning(
f"Coercing to YAML-like with key {slot.name}: Original line: {line}"
)
line = f"{slot.name}: {line}"
else:
logging.error(f"Line '{line}' does not contain a colon; ignoring")
return None
r = self._parse_line_to_dict(line, cls)
if r is not None:
field, val = r
ann[field] = val
return ann
def _parse_line_to_dict(
self, line: str, cls: ClassDefinition = None
) -> Optional[Tuple[FIELD, RESPONSE_ATOM]]:
if cls is None:
cls = self.template_class
sv = self.schemaview
# each line is a key-value pair
logging.info(f"PARSING LINE: {line}")
field, val = line.split(":", 1)
# Field nornalization:
# The LLML may mutate the output format somewhat,
# randomly pluralizing or replacing spaces with underscores
field = field.lower().replace(" ", "_")
logging.debug(f" FIELD: {field}")
cls_slots = sv.class_slots(cls.name)
slot = None
if field in cls_slots:
slot = sv.induced_slot(field, cls.name)
else:
# TODO: check this
if field.endswith("s"):
field = field[:-1]
if field in cls_slots:
slot = sv.induced_slot(field, cls.name)
if not slot:
logging.error(f"Cannot find slot for {field} in {line}")
# raise ValueError(f"Cannot find slot for {field} in {line}")
return None
if not val:
msg = f"Empty value in key-value line: {line}"
if slot.required:
raise ValueError(msg)
if slot.recommended:
logging.warning(msg)
return None
inlined = slot.inlined
slot_range = sv.get_class(slot.range)
if not inlined:
if slot.range in sv.all_classes():
inlined = sv.get_identifier_slot(slot_range.name) is None
val = val.strip()
if slot.multivalued:
vals = [v.strip() for v in val.split(";")]
else:
vals = [val]
vals = [val for val in vals if val]
logging.debug(f"SLOT: {slot.name} INL: {inlined} VALS: {vals}")
if inlined:
transformed = False
slots_of_range = sv.class_slots(slot_range.name)
if self.recurse or len(slots_of_range) > 2:
logging.debug(f" RECURSING ON SLOT: {slot.name}, range={slot_range.name}")
vals = [
self._extract_from_text_to_dict(v, slot_range) for v in vals # type: ignore
]
else:
for sep in [" - ", ":", "/", "*", "-"]:
if all([sep in v for v in vals]):
vals = [
dict(zip(slots_of_range, v.split(sep, 1))) for v in vals # type: ignore
]
for v in vals:
for k in v.keys(): # type: ignore
v[k] = v[k].strip() # type: ignore
transformed = True
break
if not transformed:
logging.warning(f"Did not find separator in {vals} for line {line}")
return None
# transform back from list to single value if not multivalued
if slot.multivalued:
final_val = vals
else:
if len(vals) != 1:
logging.error(f"Expected 1 value for {slot.name} in '{line}' but got {vals}")
final_val = vals[0] # type: ignore
return field, final_val
def parse_completion_payload(
self, results: str, cls: ClassDefinition = None, object: dict = None
) -> pydantic.BaseModel:
"""
Parse the completion payload into a pydantic class.
:param results:
:param cls:
:param object: stub object
:return:
"""
raw = self._parse_response_to_dict(results, cls)
logging.debug(f"RAW: {raw}")
if object:
raw = {**object, **raw}
self._auto_add_ids(raw, cls)
return self.ground_annotation_object(raw, cls)
def _auto_add_ids(self, ann: RESPONSE_DICT, cls: ClassDefinition = None) -> None:
if ann is None:
return
if cls is None:
cls = self.template_class
for slot in self.schemaview.class_induced_slots(cls.name):
if slot.identifier:
if slot.name not in ann:
auto_id = str(uuid.uuid4())
auto_prefix = self.auto_prefix
if slot.range == "uriorcurie" or slot.range == "uri":
ann[slot.name] = f"{auto_prefix}:{auto_id}"
else:
ann[slot.name] = auto_id
def ground_annotation_object(
self, ann: RESPONSE_DICT, cls: ClassDefinition = None
) -> Optional[pydantic.BaseModel]:
"""Ground the direct parse of the OpenAI payload.
The raw openAI payload is a YAML-like string, which is parsed to
a response dictionary.
This dictionary is then grounded, using this method
:param ann: Raw annotation object
:param cls: schema class the ground object should instantiate
:return: Grounded annotation object
"""
logging.debug(f"Grounding annotation object {ann}")
if cls is None:
cls = self.template_class
sv = self.schemaview
new_ann: Dict[str, Any] = {}
if ann is None:
logging.error(f"Cannot ground None annotation, cls={cls.name}")
return None
for field, vals in ann.items():
if isinstance(vals, list):
multivalued = True
else:
multivalued = False
vals = [vals]
slot = sv.induced_slot(field, cls.name)
rng_cls = sv.get_class(slot.range)
enum_def = None
if slot.range:
if slot.range in self.schemaview.all_enums():
enum_def = self.schemaview.get_enum(slot.range)
new_ann[field] = []
logging.debug(f"FIELD: {field} SLOT: {slot.name}")
for val in vals:
if not val:
continue
logging.debug(f" VAL: {val}")
if isinstance(val, tuple):
# special case for pairs
sub_slots = sv.class_induced_slots(rng_cls.name)
obj = {}
for i in range(0, len(val)):
sub_slot = sub_slots[i]
sub_rng = sv.get_class(sub_slot.range)
if not sub_rng:
logging.error(f"Cannot find range for {sub_slot.name}")
result = self.normalize_named_entity(val[i], sub_slot.range)
obj[sub_slot.name] = result
elif isinstance(val, dict):
# recurse
obj = self.ground_annotation_object(val, rng_cls)
else:
obj = self.normalize_named_entity(val, slot.range) # type: ignore
if enum_def:
found = False
logging.info(f"Looking for {obj} in {enum_def.name}")
for k, _pv in enum_def.permissible_values.items():
if type(obj) is str and type(k) is str:
if obj.lower() == k.lower():
obj = k # type: ignore
found = True
break
if not found:
logging.info(f"Cannot find enum value for {obj} in {enum_def.name}")
obj = None
if multivalued:
new_ann[field].append(obj)
else:
new_ann[field] = obj
logging.debug(f"Creating object from dict {new_ann}")
logging.info(new_ann)
py_cls = self.template_module.__dict__[cls.name]
return py_cls(**new_ann)
| [
"Normalize the following semicolon separated list of terms to the {ontology.upper()} ontology\n\n",
"; ",
"===\n\n",
"===\n\nTerms:",
".\n",
"example:\n",
"Generate a comprehensive description of {entity}.\n",
"PLACEHOLDER\n\nText:\nPLACEHOLDER\n\n===\n\n",
" and ",
"From the text below, extract the following entities in the following format:\n\n",
"For example:\n\n",
"PLACEHOLDER: PLACEHOLDER\n",
"\n\n===\n\n",
", ",
"Generate a comprehensive description of {entity}. ",
"The description should include the information on",
"Split the following piece of text into fields in the following format:\n\n"
] |
2024-01-10 | bohuizhang/ontogpt | src~ontogpt~engines~knowledge_engine.py | """Main Knowledge Extractor class."""
import importlib
import logging
import re
from abc import ABC
from dataclasses import dataclass, field
from pathlib import Path
from types import ModuleType
from typing import Dict, Iterator, List, Optional, TextIO, Union
from urllib.parse import quote
import inflection
import openai
import pydantic
import tiktoken
import yaml
from linkml_runtime import SchemaView
from linkml_runtime.linkml_model import ClassDefinition, ElementName, SlotDefinition
from oaklib import BasicOntologyInterface, get_adapter
from oaklib.datamodels.text_annotator import TextAnnotationConfiguration
from oaklib.implementations import OntoPortalImplementationBase
from oaklib.interfaces import MappingProviderInterface, TextAnnotatorInterface
from oaklib.utilities.apikey_manager import get_apikey_value
from oaklib.utilities.subsets.value_set_expander import ValueSetExpander
from ontogpt import DEFAULT_MODEL
from ontogpt.clients import OpenAIClient
from ontogpt.templates.core import ExtractionResult, NamedEntity
this_path = Path(__file__).parent
logger = logging.getLogger(__name__)
OBJECT = Union[str, pydantic.BaseModel, dict]
EXAMPLE = OBJECT
FIELD = str
TEMPLATE_NAME = str
MODEL_NAME = str
# annotation metamodel
ANNOTATION_KEY_PROMPT = "prompt"
ANNOTATION_KEY_PROMPT_SKIP = "prompt.skip"
ANNOTATION_KEY_ANNOTATORS = "annotators"
ANNOTATION_KEY_RECURSE = "ner.recurse"
ANNOTATION_KEY_EXAMPLES = "prompt.examples"
# TODO: introspect
# TODO: move this to its own module
DATAMODELS = [
"bioloigical_process.BiologicalProcess",
"biotic_interaction.BioticInteraction",
"cell_type.CellTypeDocument",
"ctd.ChemicalToDiseaseDocument",
"diagnostic_procedure.DiagnosticProceduretoPhenotypeAssociation",
"drug.DrugMechanism",
"environmental_sample.Study",
"gocam.GoCamAnnotations",
"mendelian_disease.MendelianDisease",
"phenotype.Trait",
"reaction.Reaction",
"recipe.Recipe",
"treatment.DiseaseTreatmentSummary",
]
def chunk_text(text: str, window_size=3) -> Iterator[str]:
"""Chunk text into windows of sentences."""
sentences = re.split(r"[.?!]\s+", text)
for right_index in range(1, len(sentences)):
left_index = max(0, right_index - window_size)
yield ". ".join(sentences[left_index:right_index])
@dataclass
class KnowledgeEngine(ABC):
"""
Abstract base class for all knowledge engines.
A Knowledge Engine is able to extract knowledge from text, utilizing
knowledge sources plus LLMs
"""
template: TEMPLATE_NAME = ""
"""LinkML Template to use for this engine.
Must be of the form <module_name>.<ClassName>"""
template_class: ClassDefinition = None
"""LinkML Class for the template.
This is derived from the template and does not need to be set manually."""
template_pyclass = None
"""Python class for the template.
This is derived from the template and does not need to be set manually."""
template_module: Optional[ModuleType] = None
"""Python module for the template.
This is derived from the template and does not need to be set manually."""
schemaview: SchemaView = None
"""LinkML SchemaView over the template.
This is derived from the template and does not need to be set manually."""
api_key: str = ""
"""OpenAI API key."""
model: MODEL_NAME = ""
"""Language Model. This may be overridden in subclasses."""
# annotator: TextAnnotatorInterface = None
# """Default annotator. TODO: deprecate?"""
annotators: Optional[Dict[str, List[TextAnnotatorInterface]]] = None
"""Annotators for each class.
An annotator will ground/map labels to CURIEs.
These override the annotators annotated in the template
"""
skip_annotators: Optional[List[TextAnnotatorInterface]] = None
"""Annotators to skip.
This overrides any specified in the schema"""
mappers: Optional[List[BasicOntologyInterface]] = None
"""List of concept mappers, to assist in grounding to desired ID prefix"""
labelers: Optional[List[BasicOntologyInterface]] = None
"""Labelers that map CURIEs to labels"""
client: Optional[OpenAIClient] = None
"""All calls to LLMs are delegated through this client"""
dictionary: Dict[str, str] = field(default_factory=dict)
"""Local dictionary of strings/labels to IDs"""
value_set_expansions: Dict[str, List[str]] = field(default_factory=dict)
min_grounding_text_overlap = 0.66
"""Min proportion of overlap in characters between text and grounding. TODO: use tokenization"""
named_entities: List[NamedEntity] = field(default_factory=list)
"""Cache of all named entities"""
auto_prefix: str = ""
"""If set then non-normalized named entities will be mapped to this prefix"""
last_text: str = ""
"""Cache of last text."""
last_prompt: str = ""
"""Cache of last prompt used."""
encoding = None
def __post_init__(self):
if self.template:
self.template_class = self._get_template_class(self.template)
if self.template_class:
logging.info(f"Using template {self.template_class.name}")
if not self.model:
self.model = DEFAULT_MODEL
if self.mappers is None:
logging.info("Using mappers (currently hardcoded)")
self.mappers = [get_adapter("translator:")]
self.set_up_client()
try:
self.encoding = tiktoken.encoding_for_model(self.client.model)
except KeyError:
self.encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
logger.error(f"Could not find encoding for model {self.client.model}")
def set_api_key(self, key: str):
self.api_key = key
openai.api_key = key
def extract_from_text(
self, text: str, cls: ClassDefinition = None, object: OBJECT = None
) -> ExtractionResult:
raise NotImplementedError
def extract_from_file(self, file: Union[str, Path, TextIO]) -> pydantic.BaseModel:
"""
Extract annotations from the given text.
:param file:
:return:
"""
if isinstance(file, str):
file = Path(file)
if isinstance(file, Path):
with file.open() as f:
text = f.read()
else:
text = file.read()
self.last_text = text
r = self.extract_from_text(text)
r.input_id = str(file)
return r
def load_dictionary(self, path: Union[str, Path, list]):
if not isinstance(path, list):
logger.info(f"Loading dictionary from {path}")
with open(str(path)) as f:
return self.load_dictionary(yaml.safe_load(f))
if self.dictionary is None:
self.dictionary = {}
entries = [(entry["synonym"].lower(), entry["id"]) for entry in path]
entries = sorted(entries, key=lambda x: len(x[0]), reverse=True)
for syn, id in entries:
if syn in self.dictionary and self.dictionary[syn] != id:
logger.warning(f"Duplicate synonym: {syn} => {id}, {self.dictionary[syn]}")
self.dictionary[syn] = id
logger.info(f"Loaded {len(self.dictionary)}")
# @abstractmethod
def synthesize(self, cls: ClassDefinition = None, object: OBJECT = None) -> ExtractionResult:
raise NotImplementedError
def generalize(
self, object: Union[pydantic.BaseModel, dict], examples: List[EXAMPLE], show_prompt: bool
) -> ExtractionResult:
raise NotImplementedError
def map_terms(self, terms: List[str], ontology: str, show_prompt: bool) -> Dict[str, str]:
raise NotImplementedError
def _get_template_class(self, template: TEMPLATE_NAME) -> ClassDefinition:
"""
Get the LinkML class for a template.
:param template: template name of the form module.ClassName
:return: LinkML class definition
"""
logger.info(f"Loading schema for {template}")
if "." in template:
module_name, class_name = template.split(".", 1)
else:
module_name = template
class_name = None
templates_path = this_path.parent / "templates"
path_to_template = str(templates_path / f"{module_name}.yaml")
sv = SchemaView(path_to_template)
if class_name is None:
roots = [c.name for c in sv.all_classes().values() if c.tree_root]
if len(roots) != 1:
raise ValueError(f"Template {template} does not have singular root: {roots}")
class_name = roots[0]
mod = importlib.import_module(f"ontogpt.templates.{module_name}")
self.template_module = mod
self.template_pyclass = mod.__dict__[class_name]
self.schemaview = sv
logger.info(f"Getting class for template {template}")
cls = None
for c in sv.all_classes().values():
if c.name == class_name:
cls = c
break
if not cls:
raise ValueError(f"Template {template} not found")
return cls
def _get_openai_api_key(self):
"""Get the OpenAI API key from the environment."""
# return os.environ.get("OPENAI_API_KEY")
return get_apikey_value("openai")
def get_annotators(self, cls: ClassDefinition = None) -> List[BasicOntologyInterface]:
"""
Get the annotators/labelers for a class.
The annotators are returned in order of precedence
Annotators are used to *ground* labels as CURIEs.
Annotators may also do double-duty as labelers (i.e. map CURIEs to labels)
These are specified by linkml annotations within the template/schema;
if the engine has a set of annotators specified these take precedence.
:param cls: schema class
:return: list of annotations
"""
if self.annotators and cls.name in self.annotators:
annotators = self.annotators[cls.name]
else:
if ANNOTATION_KEY_ANNOTATORS not in cls.annotations:
logger.error(f"No annotators for {cls.name}")
return []
annotators = cls.annotations[ANNOTATION_KEY_ANNOTATORS].value.split(", ")
logger.info(f" Annotators: {annotators} [will skip: {self.skip_annotators}]")
annotators = []
for annotator in annotators:
if isinstance(annotator, str):
logger.info(f"Loading annotator {annotator}")
if self.skip_annotators and annotator in self.skip_annotators:
logger.info(f"Skipping annotator {annotator}")
continue
if annotator not in self.annotators:
self.annotators[annotator] = get_adapter(annotator)
annotators.append(self.annotators[annotator])
elif isinstance(annotator, BasicOntologyInterface):
annotators.append(annotator)
else:
raise ValueError(f"Unknown annotator type {annotator}")
return annotators
def promptable_slots(self, cls: Optional[ClassDefinition] = None) -> List[SlotDefinition]:
"""
List of all slots that are not skipped for purposes of prompting.
Examples of slots that are skipped are:
- identifier fields
- the source text used in extraction
- other metadata that is outside what we might want to predict
:param cls:
:return:
"""
if cls is None:
cls = self.template_class
sv = self.schemaview
return [s for s in sv.class_induced_slots(cls.name) if not self.slot_is_skipped(s)]
def slot_is_skipped(self, slot: SlotDefinition) -> bool:
if ANNOTATION_KEY_PROMPT_SKIP in slot.annotations:
return True
else:
return False
def normalize_named_entity(self, text: str, range: ElementName) -> str:
"""
Grounds and normalizes to preferred ID prefixes.
if the entity cannot be grounded and normalized, the original text is returned.
:param text:
:param cls:
:return:
"""
sv = self.schemaview
cls = sv.get_class(range)
if cls is None:
return text
if ANNOTATION_KEY_EXAMPLES in cls.annotations:
examples = cls.annotations[ANNOTATION_KEY_EXAMPLES].value.split(", ")
examples = [x.lower() for x in examples]
logger.debug(f"Will exclude if in list of examples: {examples}")
if text.lower() in examples:
logger.warning(f"Likely a hallucination as it is the example set: {text}")
return f"LIKELY HALLUCINATION: {text}"
for obj_id in self.groundings(text, cls):
logger.info(f"Grounding {text} to {obj_id}; next step is to normalize")
for normalized_id in self.normalize_identifier(obj_id, cls):
if not any(e for e in self.named_entities if e.id == normalized_id):
self.named_entities.append(NamedEntity(id=normalized_id, label=text))
logger.info(f"Normalized {text} with {obj_id} to {normalized_id}")
return normalized_id
logger.info(f"Could not ground and normalize {text} to {cls.name}")
if self.auto_prefix:
obj_id = f"{self.auto_prefix}:{quote(text)}"
if not any(e for e in self.named_entities if e.id == obj_id):
self.named_entities.append(NamedEntity(id=obj_id, label=text))
else:
obj_id = text
if ANNOTATION_KEY_RECURSE in cls.annotations:
logger.info(f"Using recursive strategy to parse: {text} to {cls.name}")
obj = self.extract_from_text(text, cls).extracted_object
if obj:
if self.named_entities is None:
self.named_entities = []
try:
obj.id = obj_id
except ValueError as e:
logger.error(f"No id for {obj} {e}")
self.named_entities.append(obj)
return obj_id
def is_valid_identifier(self, input_id: str, cls: ClassDefinition) -> bool:
sv = self.schemaview
if cls.id_prefixes:
if ":" not in input_id:
return False
prefix, rest = input_id.split(":", 1)
if prefix not in cls.id_prefixes:
logger.debug(f"ID {input_id} not in prefixes {cls.id_prefixes}")
return False
id_slot = sv.get_identifier_slot(cls.name)
if id_slot and id_slot.pattern:
id_regex = re.compile(id_slot.pattern)
m = re.match(id_regex, input_id)
if not m:
logger.debug(f"ID {input_id} does not match pattern {id_slot.pattern}")
return False
if id_slot and id_slot.values_from:
vse = ValueSetExpander()
is_found = False
for e in id_slot.values_from:
if e not in self.value_set_expansions:
# expanding value set for first time
range_enum = sv.get_enum(e)
pvs = vse.expand_value_set(range_enum, sv.schema)
valid_ids = [pv.text for pv in pvs]
self.value_set_expansions[e] = valid_ids
logger.info(f"Expanded {e} to {len(valid_ids)} IDs")
if input_id in self.value_set_expansions[e]:
is_found = True
logger.info(f"ID {input_id} found in value set {e}")
break
if not is_found:
logger.info(f"ID {input_id} not in value set {e}")
return False
return True
def normalize_identifier(self, input_id: str, cls: ClassDefinition) -> Iterator[str]:
if self.is_valid_identifier(input_id, cls):
yield input_id
for obj_id in self.map_identifier(input_id, cls):
if obj_id == input_id:
continue
if self.is_valid_identifier(obj_id, cls):
yield obj_id
def map_identifier(self, input_id: str, cls: ClassDefinition) -> Iterator[str]:
"""
Normalize an identifier to a preferred prefix.
:param input_id:
:param cls:
:return:
"""
if input_id.startswith("http://purl.bioontology.org/ontology"):
# TODO: this should be fixed upstream in OAK
logging.info(f"Normalizing BioPortal id {input_id}")
input_id = input_id.replace("http://purl.bioontology.org/ontology/", "").replace(
"/", ":"
)
if input_id.startswith("http://id.nlm.nih.gov/mesh/"):
# TODO: this should be fixed upstream in OAK
logging.info(f"Normalizing MESH id {input_id}")
input_id = input_id.replace("http://id.nlm.nih.gov/mesh/", "").replace("/", ":")
if input_id.startswith("drugbank:"):
input_id = input_id.replace("drugbank:", "DRUGBANK:")
yield input_id
if not cls.id_prefixes:
return
if not self.mappers:
return
for mapper in self.mappers:
if isinstance(mapper, MappingProviderInterface):
for mapping in mapper.sssom_mappings([input_id]):
yield str(mapping.object_id)
else:
raise ValueError(f"Unknown mapper type {mapper}")
def groundings(self, text: str, cls: ClassDefinition) -> Iterator[str]:
"""
Ground the given text to element identifiers.
This can potentially yield multiple possible alternatives; these
should be yielded in priority order.
- if there is a different singular form of the text, yield from that first
- dictionary exact matches are yielded first
- dictionary partial matches are yielded next
- annotators are yielded next, in order in which they are specified in the schema
:param text: text to ground, e.g. gene symbol
:param cls: schema class the ground object should instantiate
:return:
"""
logger.info(f"GROUNDING {text} using {cls.name}")
id_matches = re.match(r"^(\S+):(\d+)$", text)
if id_matches:
obj_prefix = id_matches.group(1)
matching_prefixes = [x for x in cls.id_prefixes if x.upper() == obj_prefix.upper()]
if matching_prefixes:
yield matching_prefixes[0] + ":" + id_matches.group(2)
text_lower = text.lower()
text_singularized = inflection.singularize(text_lower)
if text_singularized != text_lower:
logger.info(f"Singularized {text} to {text_singularized}")
yield from self.groundings(text_singularized, cls)
paren_char = "["
parenthetical_components = re.findall(r"\[(.*?)\]", text_lower)
if not parenthetical_components:
paren_char = "("
parenthetical_components = re.findall(r"\((.*?)\)", text_lower)
if parenthetical_components:
logger.info(f"{text_lower} =>paren=> {parenthetical_components}")
trimmed_text = text_lower
for component in parenthetical_components:
if component:
logger.debug(
f"RECURSIVE GROUNDING OF {component} from {parenthetical_components}"
)
yield from self.groundings(component, cls)
if paren_char == "(":
trimmed_text = trimmed_text.replace(f"({component})", "")
elif paren_char == "[":
trimmed_text = trimmed_text.replace(f"[{component}]", "")
else:
raise AssertionError(f"Unknown paren char {paren_char}")
trimmed_text = trimmed_text.strip().replace(" ", " ")
if trimmed_text:
if len(trimmed_text) >= len(text_lower):
raise AssertionError(
f"Trimmed text {trimmed_text} is not shorter than {text_lower}"
)
logger.debug(
f"{text_lower} =>trimmed=> {trimmed_text}; in {parenthetical_components}"
)
yield from self.groundings(trimmed_text, cls)
if self.dictionary and text_lower in self.dictionary:
obj_id = self.dictionary[text_lower]
logger.debug(f"Found {text} in dictionary: {obj_id}")
yield obj_id
if self.dictionary:
for syn, obj_id in self.dictionary.items():
if syn in text_lower:
if len(syn) / len(text_lower) > self.min_grounding_text_overlap:
logger.debug(f"Found {syn} < {text} in dictionary: {obj_id}")
yield obj_id
if self.annotators and cls.name in self.annotators:
annotators = self.annotators[cls.name]
else:
if ANNOTATION_KEY_ANNOTATORS not in cls.annotations:
annotators = []
else:
annotators = cls.annotations[ANNOTATION_KEY_ANNOTATORS].value.split(", ")
logger.info(f" Annotators: {annotators} [will skip: {self.skip_annotators}]")
# prioritize whole matches by running these first
for matches_whole_text in [True, False]:
config = TextAnnotationConfiguration(matches_whole_text=matches_whole_text)
for annotator in annotators:
if isinstance(annotator, str):
if self.skip_annotators and annotator in self.skip_annotators:
continue
if self.annotators is None:
self.annotators = {}
if annotator not in self.annotators:
logger.info(f"Loading annotator {annotator}")
self.annotators[annotator] = get_adapter(annotator)
annotator = self.annotators[annotator]
if not matches_whole_text and not isinstance(
annotator, OntoPortalImplementationBase
):
# TODO: allow more fine-grained control
logger.info(
f"Skipping {type(annotator)} as it does not support partial matches"
)
continue
try:
results = annotator.annotate_text(text, config)
for result in results:
yield result.object_id
except Exception as e:
logger.error(f"Error with {annotator} for {text}: {e}")
# def ground_text_to_id(self, text: str, cls: ClassDefinition = None) -> str:
# raise NotImplementedError
def merge_resultsets(
self, resultset: List[ExtractionResult], unique_fields: List[str]
) -> ExtractionResult:
"""
Merge all resultsets into a single resultset.
Note the first element of the list is mutated.
:param resultset:
:return:
"""
result = resultset[0].extracted_object
for next_extraction in resultset[1:]:
next_result = next_extraction.extracted_object
if unique_fields:
for k in unique_fields:
if k in result and k in next_result:
if result[k] != next_result[k]:
logger.error(
f"Cannot merge unique fields: {k} {result[k]} != {next_result[k]}"
)
continue
for k, v in vars(next_result).items():
curr_v = getattr(result, k, None)
if isinstance(v, list):
if all(isinstance(x, str) for x in v):
setattr(result, k, list(set(curr_v).union(set(v))))
else:
setattr(result, k, curr_v + v)
else:
if curr_v and v and curr_v != v:
logger.error(f"Cannot merge {curr_v} and {v}")
if v:
setattr(result, k, v)
return resultset[0]
def set_up_client(self):
self.client = OpenAIClient(model=self.model)
logging.info("Setting up OpenAI client API Key")
self.api_key = self._get_openai_api_key()
openai.api_key = self.api_key
| [
"<class 'str'>",
"prompt.skip",
"prompt",
"PLACEHOLDER.yaml",
"None"
] |
2024-01-10 | PhilippThoelke/goofi-pipe | src~goofi~nodes~inputs~imagegeneration.py | from os.path import join
from typing import Any, Dict, Tuple
import cv2
import numpy as np
from goofi.data import Data, DataType
from goofi.node import Node
from goofi.params import BoolParam, FloatParam, IntParam, StringParam
class ImageGeneration(Node):
def config_params():
return {
"image_generation": {
"model_id": StringParam("dall-e-3", options=["stabilityai/stable-diffusion-2-1", "dall-e-2", "dall-e-3"]),
"openai_key": StringParam("openai.key"),
"inference_steps": IntParam(50, 5, 100),
"guidance_scale": FloatParam(7.5, 0.1, 20),
"use_fixers": BoolParam(True, doc="Use textural inversion (nfixer and nrealfixer) for better image quality."),
"seed": IntParam(-1, -1, 1000000, doc="-1 for random seed"),
"width": IntParam(512, 100, 1024),
"height": IntParam(512, 100, 1024),
"scheduler": StringParam(
list(SCHEDULER_MAPPING.keys())[0],
options=list(SCHEDULER_MAPPING.keys()),
),
"device": "cuda",
},
"img2img": {
"enabled": False,
"strength": FloatParam(0.8, 0, 1),
"resize_input": True,
"reset_image": BoolParam(False, trigger=True),
},
}
def config_input_slots():
return {"prompt": DataType.STRING, "negative_prompt": DataType.STRING, "base_image": DataType.ARRAY}
def config_output_slots():
return {"img": DataType.ARRAY}
def setup(self):
if self.params.image_generation.model_id.value == "stabilityai/stable-diffusion-2-1":
self.torch, self.diffusers = import_libs("stabilityai/stable-diffusion-2-1")
# load StableDiffusion model
if self.params.img2img.enabled.value:
# TODO: make sure this works without internet access
self.sd_pipe = self.diffusers.StableDiffusionImg2ImgPipeline.from_pretrained(
self.params.image_generation.model_id.value, torch_dtype=self.torch.float16
)
else:
# TODO: make sure this works without internet access
self.sd_pipe = self.diffusers.StableDiffusionPipeline.from_pretrained(
self.params.image_generation.model_id.value, torch_dtype=self.torch.float16
)
# set device
self.sd_pipe.to(self.params.image_generation.device.value)
# initialize scheduler
self.image_generation_scheduler_changed(self.params.image_generation.scheduler.value)
# load textural inversions
if self.params.image_generation.use_fixers.value:
self.sd_pipe.load_textual_inversion(join(self.assets_path, "nfixer.pt"))
self.sd_pipe.load_textual_inversion(join(self.assets_path, "nrealfixer.pt"))
# initialize last image
if not hasattr(self, "last_img"):
self.last_img = None
self.reset_last_img()
elif self.params.image_generation.model_id.value in ["dall-e-2", "dall-e-3"]:
self.base64, self.openai = import_libs("dall-e")
# load Dall-E model
if self.params.img2img.enabled.value:
self.dalle_pipe = self.openai.OpenAI().images.edit
else:
self.dalle_pipe = self.openai.OpenAI().images.generate
# initialize last image
if not hasattr(self, "last_img"):
self.last_img = None
self.reset_last_img()
else:
raise ValueError(f"Unknown model: {self.params.image_generation.model_id.value}")
def process(self, prompt: Data, negative_prompt: Data, base_image: Data) -> Dict[str, Tuple[np.ndarray, Dict[str, Any]]]:
if prompt is None:
return None
prompt = prompt.data
if self.params.img2img.reset_image.value:
# reset the last image to zeros
self.reset_last_img()
if self.params.img2img.enabled.value:
# update the last image
if base_image is None:
base_image = self.last_img
else:
base_image = base_image.data
# resize
if self.params.img2img.resize_input.value:
# resize input image to match the last image
base_image = cv2.resize(
base_image, (self.params.image_generation.width.value, self.params.image_generation.height.value)
)
if base_image.ndim == 3:
# add batch dimension
base_image = np.expand_dims(base_image, 0)
# convert to uint8
base_image = (base_image * 255).astype(np.uint8)
base_image = cv2.cvtColor(base_image, cv2.COLOR_RGB2RGBA)
# convert to bytes
base_image = cv2.imencode(".png", base_image)[1].tobytes()
# remote pipes
if self.params.image_generation.model_id.value in ["dall-e-3", "dall-e-2"]:
size = f"{self.params.image_generation.width.value}x{self.params.image_generation.height.value}"
if self.params.img2img.enabled.value:
# raise error because img2img is not working yet
raise NotImplementedError("img2img is not working yet.")
# run the Dall-E img2img pipeline
response = self.dalle_pipe(
image=base_image,
prompt=prompt,
n=1,
size=size,
response_format="b64_json",
)
else:
try:
# run the Dall-E txt2img pipeline
response = self.dalle_pipe(model=self.params.image_generation.model_id.value,
prompt=prompt,
n=1,
size=size,
quality="standard",
response_format="b64_json",)
except self.openai.BadRequestError as e:
if e.response.status_code == 400:
print(f"Error code 400: the size of the image is not supported by the model."
f"\nYour Model: {self.params.image_generation.model_id.value}"
f"\n1024x1024 is minimum for Dall-E3")
img = response.data[0].b64_json
# Decode base64 to bytes
decoded_bytes = self.base64.b64decode(img)
# Convert bytes to numpy array using OpenCV
img_array = cv2.imdecode(np.frombuffer(decoded_bytes, np.uint8), cv2.IMREAD_COLOR)
img_array = img_array.astype(np.float32) / 255.0
# Ensure correct shape
if img_array.shape != (self.params.image_generation.width.value, self.params.image_generation.height.value, 3):
img_array = cv2.resize(
img_array, (self.params.image_generation.width.value, self.params.image_generation.height.value)
)
# Add batch dimension
img = np.expand_dims(img_array, 0)
# save last image
self.last_img = np.array(img[0])
return {
"img": (img, {"prompt": prompt, "negative_prompt": negative_prompt if negative_prompt is not None else None})
}
# local pipes
elif self.params.image_generation.model_id.value == "stabilityai/stable-diffusion-2-1":
# set seed
if self.params.image_generation.seed.value != -1:
self.torch.manual_seed(self.params.image_generation.seed.value)
# add textural inversions to the negative prompt
negative_prompt = negative_prompt.data if negative_prompt is not None else ""
if self.params.image_generation.use_fixers.value:
negative_prompt = " ".join([negative_prompt, "nfixer nrealfixer"])
with self.torch.inference_mode():
if self.params.img2img.enabled.value:
if base_image is None:
base_image = self.last_img
else:
base_image = base_image.data
# run the img2img stable diffusion pipeline
img, _ = self.sd_pipe(
image=base_image,
strength=self.params.img2img.strength.value,
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=self.params.image_generation.inference_steps.value,
guidance_scale=self.params.image_generation.guidance_scale.value,
return_dict=False,
output_type="np",
)
else:
if base_image is not None:
raise ValueError(
"base_image is not supported in text2img mode. Enable img2img or disconnect base_image."
)
# run the text2img stable diffusion pipeline
img, _ = self.sd_pipe(
prompt=prompt,
negative_prompt=negative_prompt,
width=self.params.image_generation.width.value,
height=self.params.image_generation.height.value,
num_inference_steps=self.params.image_generation.inference_steps.value,
guidance_scale=self.params.image_generation.guidance_scale.value,
return_dict=False,
output_type="np",
)
# remove the batch dimension
img = np.array(img[0])
# save last image
self.last_img = img
return {
"img": (
img,
{"prompt": prompt, "negative_prompt": negative_prompt if negative_prompt is not None else None},
)
}
def reset_last_img(self):
"""Reset the last image."""
self.last_img = np.zeros((self.params.image_generation.height.value, self.params.image_generation.width.value, 3))
def image_generation_use_fixers_changed(self, value):
"""Load the textural inversions."""
if value:
# load the textural inversions
self.sd_pipe.load_textual_inversion(join(self.assets_path, "nfixer.pt"))
self.sd_pipe.load_textual_inversion(join(self.assets_path, "nrealfixer.pt"))
else:
# reload the model without the textural inversions
self.setup()
def image_generation_scheduler_changed(self, value):
"""Change the scheduler of the Stable Diffusion pipeline."""
scheduler_settings = dict(SCHEDULER_MAPPING[value])
scheduler_type = scheduler_settings.pop("_sched")
self.sd_pipe.scheduler = getattr(self.diffusers, scheduler_type).from_config(
self.sd_pipe.scheduler.config, **scheduler_settings
)
def image_generation_width_changed(self, value):
"""Resize the last image to match the new width (for img2img)."""
self.last_img = cv2.resize(self.last_img, (value, self.params.image_generation.height.value))
def image_generation_height_changed(self, value):
"""Resize the last image to match the new height (fog img2img)."""
self.last_img = cv2.resize(self.last_img, (self.params.image_generation.width.value, value))
def img2img_enabled_changed(self, value):
"""Load the correct Stable Diffusion pipeline."""
self.setup()
def import_libs(checks):
if checks == "stabilityai/stable-diffusion-2-1":
try:
import torch
except ImportError:
raise ImportError(
"You need to install torch to use the ImageGeneration node with Stable Diffusion: "
"https://pytorch.org/get-started/locally/"
)
try:
import diffusers
except ImportError:
raise ImportError(
"You need to install diffusers to use the ImageGeneration node with Stable Diffusion: pip install diffusers"
)
return torch, diffusers
elif checks == "dall-e":
try:
import openai
except ImportError:
raise ImportError("You need to install openai to use the ImageGeneration node with Dall-E: pip install openai")
try:
import base64
except ImportError:
raise ImportError("You need to import base64 to use the ImageGeneration node with Dall-E")
return base64, openai
else:
raise ValueError(f"Unknown model: {checks}")
SCHEDULER_MAPPING = {
"DPM++ 2M": dict(_sched="DPMSolverMultistepScheduler"),
"DPM++ 2M Karras": dict(_sched="DPMSolverMultistepScheduler", use_karras_sigmas=True),
"DPM++ 2M SDE": dict(_sched="DPMSolverMultistepScheduler", algorithm_type="sde-dpmsolver++"),
"DPM++ 2M SDE Karras": dict(_sched="DPMSolverMultistepScheduler", use_karras_sigmas=True, algorithm_type="sde-dpmsolver++"),
"DPM++ SDE": dict(_sched="DPMSolverSinglestepScheduler"),
"DPM++ SDE Karras": dict(_sched="DPMSolverSinglestepScheduler", use_karras_sigmas=True),
"DPM2": dict(_sched="KDPM2DiscreteScheduler"),
"DPM2 Karras": dict(_sched="KDPM2DiscreteScheduler", use_karras_sigmas=True),
"DPM2 a": dict(_sched="KDPM2AncestralDiscreteScheduler"),
"DPM2 a Karras": dict(_sched="KDPM2AncestralDiscreteScheduler", use_karras_sigmas=True),
"Euler": dict(_sched="EulerDiscreteScheduler"),
"Euler a": dict(_sched="EulerAncestralDiscreteScheduler"),
"Heun": dict(_sched="HeunDiscreteScheduler"),
"LMS": dict(_sched="LMSDiscreteScheduler"),
"LMS Karras": dict(_sched="LMSDiscreteScheduler", use_karras_sigmas=True),
}
| [
"nfixer nrealfixer",
" "
] |
2024-01-10 | PhilippThoelke/goofi-pipe | src~goofi~nodes~analysis~img2txt.py | import openai
import threading
from goofi.node import Node
from goofi.params import FloatParam, IntParam, StringParam
from goofi.data import Data, DataType
import base64
import requests
from PIL import Image
import io
class Img2Txt(Node):
def config_input_slots():
return {"image": DataType.ARRAY}
def config_output_slots():
return {"generated_text": DataType.STRING}
def config_params():
return {
"img_to_text": {
"openai_key": StringParam("openai.key"),
"model": StringParam("gpt-4-vision-preview"),
"max_tokens": IntParam(300, 10, 1024),
"question": StringParam("What’s in this image?"),
}
}
def setup(self):
key = self.params["img_to_text"]["openai_key"].value
with open(key, "r") as f:
openai.api_key = f.read().strip()
def encode_image(self, image_array):
if image_array.dtype != 'uint8':
# Normalize the array to 0-255 and convert to uint8
image_array = (255 * (image_array - image_array.min()) / (image_array.max() - image_array.min())).astype('uint8')
# Convert the NumPy array to a PIL Image
image = Image.fromarray(image_array)
buffered = io.BytesIO()
# Save the image as JPEG to the buffer
image.save(buffered, format="JPEG")
buffered.seek(0)
# Encode the buffered image to base64
return base64.b64encode(buffered.read()).decode('utf-8')
def process(self, image: Data):
if image.data is None:
return None
# Assuming image.data is a NumPy array representing an image
model = self.params["img_to_text"]["model"].value
max_tokens = self.params["img_to_text"]["max_tokens"].value
base64_image = self.encode_image(image.data)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {openai.api_key}"
}
question = self.params["img_to_text"]["question"].value
payload = {
"model": model,
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": question
},
{
"type": "image_url",
"image_url": f"data:image/jpeg;base64,{base64_image}"
}
]
}
],
"max_tokens": max_tokens
}
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
print("STATUS", response.status_code)
print("RESPONSE", response.json())
if response.status_code == 200:
generated_text = response.json().get('choices', [{}])[0].get('message', {}).get('content', '')
return {"generated_text": (generated_text, {})}
else:
return {"generated_text": ('Error In Generating Text', {})}
| [
"[{'type': 'text', 'text': PLACEHOLDER}, {'type': 'image_url', 'image_url': 'data:image/jpeg;base64,PLACEHOLDER'}]"
] |
2024-01-10 | PhilippThoelke/goofi-pipe | src~goofi~nodes~analysis~speechsynthesis.py | import openai
import io
import soundfile as sf
from goofi.node import Node
import numpy as np
from goofi.params import StringParam, FloatParam
from goofi.data import Data, DataType
import librosa
class SpeechSynthesis(Node):
def config_input_slots():
return {"text": DataType.STRING, "voice": DataType.ARRAY}
def config_output_slots():
return {"speech": DataType.ARRAY, "transcript": DataType.STRING}
def config_params():
return {
"speech_generation": {
"openai_key": StringParam("openai.key"),
"speed": FloatParam(0.25, 0.1, 2)
}
}
def setup(self):
key = self.params["speech_generation"]["openai_key"].value
with open(key, "r") as f:
openai.api_key = f.read().strip()
def process(self, text: Data, voice: Data,):
speech = None
transcript = ''
if text is None and voice is None:
return {"speech": (np.array([]), {}), "transcript": ("", {})}
if text is not None:
audio_generator = self.synthesize_speech_stream(text.data, self.params["speech_generation"]["speed"].value)
speech = self.bytes_to_array(b''.join([speech_bytes for speech_bytes in audio_generator])) # Convert bytes to numpy array
elif voice is not None:
transcript = self.transcribe_voice(voice.data) or "" # Ensure transcript is a string
return {"speech": (speech, {}), "transcript": (transcript, {})}
def synthesize_speech_stream(self, text, speed):
response = openai.audio.speech.create(
model="tts-1",
voice="alloy",
input=text,
speed=speed
)
# This would be a generator yielding audio chunks
for chunk in response.iter_bytes():
yield chunk
def bytes_to_array(self, audio_bytes):
with io.BytesIO(audio_bytes) as audio_file:
with sf.SoundFile(audio_file) as sf_file:
audio_array = np.array(sf_file.read(dtype='float32'))
# Resample if the sample rate is different from 44100 Hz
if sf_file.samplerate != 44100:
audio_array = self.resample_audio(audio_array, sf_file.samplerate, 44100)
return audio_array
def resample_audio(self, audio, input_rate, output_rate):
# Resample audio from input_rate to output_rate
return librosa.resample(audio, orig_sr=input_rate, target_sr=output_rate)
def transcribe_voice(self, voice_buffer):
# Convert the numpy array buffer to a WAV file in memory
with io.BytesIO() as audio_file:
sf.write(audio_file, voice_buffer, 44100, format='wav')
audio_file.seek(0)
# Send the audio file to OpenAI for transcription
response = openai.Audio.transcriptions.create(
model="whisper-1",
file=audio_file
)
return response["choices"][0]["text"]
| [] |
2024-01-10 | PhilippThoelke/goofi-pipe | src~goofi~nodes~inputs~textgeneration.py | import openai
import threading
from goofi.node import Node
from goofi.params import FloatParam, IntParam, StringParam
from goofi.data import Data, DataType
class TextGeneration(Node):
def config_input_slots():
return {"prompt": DataType.STRING}
def config_output_slots():
return {"generated_text": DataType.STRING}
def config_params():
return {
"text_generation": {
"openai_key": StringParam("openai.key"),
"model": StringParam("gpt-3.5-turbo"),
"temperature": FloatParam(1.2, 0.0, 2.0),
"max_tokens": IntParam(128, 10, 1024),
"keep_conversation": False, # Adding new parameter
}
}
def setup(self):
key = self.params["text_generation"]["openai_key"].value
with open(key, "r") as f:
openai.api_key = f.read().strip()
self.client = openai.OpenAI()
self.messages = [] # Initialize conversation history
def process(self, prompt: Data):
if prompt.data is None:
return None
model = self.params["text_generation"]["model"].value
temperature = self.params["text_generation"]["temperature"].value
max_tokens = self.params["text_generation"]["max_tokens"].value
keep_conversation = self.params["text_generation"]["keep_conversation"].value
prompt_ = prompt.data
# Modify the process according to the value of keep_conversation
if keep_conversation:
# Here you would need to manage previous messages to keep the conversation.
user_message = {"role": "user", "content": prompt_}
self.messages.append(user_message)
else:
self.messages = [{"role": "user", "content": prompt_}] # Reset conversation history
response = self.client.chat.completions.create(
model=model,
messages=self.messages,
temperature=temperature,
max_tokens=max_tokens,
)
generated_text = response.choices[0].message.content
if keep_conversation:
# Store the latest response to maintain conversation
assistant_message = {"role": "assistant", "content": generated_text}
self.messages.append(assistant_message)
return {"generated_text": (generated_text, prompt.meta)}
| [] |
2024-01-10 | PhilippThoelke/goofi-pipe | src~goofi~nodes~analysis~connectivity.py | import numpy as np
from scipy.stats import pearsonr
from sklearn.feature_selection import mutual_info_regression
from goofi.data import Data, DataType
from goofi.node import Node
from goofi.params import FloatParam, IntParam, StringParam, BoolParam
class Connectivity(Node):
def config_input_slots():
return {"data": DataType.ARRAY}
def config_output_slots():
return {
"matrix": DataType.ARRAY,
}
def config_params():
return {
"classical": {
"method": StringParam(
"wPLI",
options=["coherence", "imag_coherence", "wPLI", "PLI", "PLV", "covariance", "pearson", "mutual_info"],
),
},
"biotuner": {
"method": StringParam(
"None", options=["None", "harmsim", "euler", "subharm_tension", "RRCi", "wPLI_crossfreq"]
),
"n_peaks": IntParam(5, 1, 10, doc="Number of peaks to extract"),
"f_min": FloatParam(2.0, 0.1, 50.0, doc="Minimum frequency"),
"f_max": FloatParam(30.0, 1.0, 100.0, doc="Maximum frequency"),
"precision": FloatParam(0.1, 0.01, 10.0, doc="Precision of the peak extraction in Hz"),
"peaks_function": StringParam(
"EMD", options=["EMD", "fixed", "harmonic_recurrence", "EIMC"], doc="Peak extraction function"
),
},
"Adjacency": {
"Binarize": BoolParam(False, doc="Binarize the connectivity matrix"),
"threshold": FloatParam(0.5, 0.0, 1.0, doc="Threshold for binarization"),
},
}
def process(self, data: Data):
if data is None:
return None
data.data = np.squeeze(data.data)
if data.data.ndim != 2:
raise ValueError("Data must be 2D")
if self.params["biotuner"]["method"].value != "None":
matrix = compute_conn_matrix_single(
data.data,
data.meta["sfreq"],
peaks_function=self.params["biotuner"]["peaks_function"].value,
min_freq=self.params["biotuner"]["f_min"].value,
max_freq=self.params["biotuner"]["f_max"].value,
precision=self.params["biotuner"]["precision"].value,
n_peaks=self.params["biotuner"]["n_peaks"].value,
metric=self.params["biotuner"]["method"].value,
)
if self.params["biotuner"]["method"].value == "None":
method = self.params["classical"]["method"].value
matrix = compute_classical_connectivity(data.data, method)
binarize = self.params["Adjacency"]["Binarize"].value
threshold = self.params["Adjacency"]["threshold"].value
if binarize:
matrix[matrix < threshold] = 0
matrix[matrix >= threshold] = 1
return {"matrix": (matrix, data.meta)}
connectivity_fn = None
def compute_conn_matrix_single(
data: np.ndarray,
sfreq: float,
peaks_function: str = "EMD",
min_freq: float = 2.0,
max_freq: float = 45.0,
precision=0.1,
n_peaks: int = 5,
metric: str = "harmsim",
):
# import the connectivity function here to avoid loading it on startup
global connectivity_fn
if connectivity_fn is None:
from biotuner.harmonic_connectivity import harmonic_connectivity
connectivity_fn = harmonic_connectivity
# compute connectivity matrix
bt_conn = connectivity_fn(
sf=sfreq,
data=data,
peaks_function=peaks_function,
precision=precision,
min_freq=min_freq,
max_freq=max_freq,
n_peaks=n_peaks,
)
bt_conn.compute_harm_connectivity(metric=metric, save=False, graph=False)
return bt_conn.conn_matrix
hilbert_fn, coherence_fn = None, None
def compute_classical_connectivity(data, method):
# import the connectivity function here to avoid loading it on startup
global hilbert_fn, coherence_fn
if hilbert_fn is None:
from scipy.signal import coherence, hilbert
hilbert_fn = hilbert
coherence_fn = coherence
n_channels, n_samples = data.shape
matrix = np.zeros((n_channels, n_channels))
if method == "covariance":
matrix = np.cov(data)
return matrix
# TODO : optimize this shit
for i in range(n_channels):
for j in range(i, n_channels): # Only compute upper diagonal
if i == j:
matrix[i, j] = 1 # diagonal elements are 1, for coherence/PLV or 0 for others, adjust as needed
continue
if method == "wPLI":
sig1 = hilbert_fn(data[i, :])
sig2 = hilbert_fn(data[j, :])
imag_csd = np.imag(np.exp(1j * (np.angle(sig1) - np.angle(sig2))))
matrix[i, j] = matrix[j, i] = np.abs(np.mean(imag_csd)) / np.mean(np.abs(imag_csd))
elif method == "coherence":
f, Cxy = coherence_fn(data[i, :], data[j, :])
matrix[i, j] = matrix[j, i] = np.mean(Cxy)
elif method == "PLI":
sig1 = hilbert_fn(data[i, :])
sig2 = hilbert_fn(data[j, :])
matrix[i, j] = matrix[j, i] = np.mean(np.sign(np.angle(sig1) - np.angle(sig2)))
elif method == "imag_coherence":
sig1 = hilbert_fn(data[i, :])
sig2 = hilbert_fn(data[j, :])
matrix[i, j] = matrix[j, i] = np.mean(np.imag(np.conj(sig1) * sig2)) / (
np.sqrt(np.mean(np.imag(sig1) ** 2)) * np.sqrt(np.mean(np.imag(sig2) ** 2))
)
elif method == "PLV":
sig1 = hilbert_fn(data[i, :])
sig2 = hilbert_fn(data[j, :])
matrix[i, j] = matrix[j, i] = np.abs(np.mean(np.exp(1j * (np.angle(sig1) - np.angle(sig2)))))
elif method == "pearson":
corr, _ = pearsonr(data[i, :], data[j, :])
matrix[i, j] = matrix[j, i] = corr
elif method == "mutual_info":
mutual_info = mutual_info_regression(data[i, :].reshape(-1, 1), data[j, :])[0]
matrix[i, j] = matrix[j, i] = mutual_info
return matrix
| [] |
2024-01-10 | act3-ace/CoRL | test~simulators~test_part_prop_override.py | import gym
from corl.simulators.openai_gym.gym_controllers import OpenAIGymMainController
from corl.simulators.openai_gym.gym_sensors import OpenAiGymStateSensor
from corl.simulators.openai_gym.gym_simulator import OpenAiGymPlatform
def test_override_controller_sensor_prop():
NUM_DISCRETE = 3
base_environment = gym.make("CartPole-v1")
# this is a discrete
base_action_space = base_environment.action_space
base_obs_space = base_environment.observation_space
controller_mod_part = (OpenAIGymMainController, {"properties": {
"n": NUM_DISCRETE
}})
sensor_mod_part = (OpenAiGymStateSensor, {"properties": {
"high": list(base_obs_space.high / 2),
"low": list(base_obs_space.low / 2)
}})
platform_mod_parts = [controller_mod_part, sensor_mod_part]
openai_mod_platform = OpenAiGymPlatform(platform_name="blue0", platform=base_environment, parts_list=platform_mod_parts)
controller_space = list(openai_mod_platform.controllers.values())[0].control_properties.create_space()
sensor_space = list(openai_mod_platform.sensors.values())[0].measurement_properties.create_space()
assert controller_space != base_action_space
assert controller_space == gym.spaces.Discrete(NUM_DISCRETE)
assert sensor_space != base_obs_space
| [] |
2024-01-10 | act3-ace/CoRL | corl~simulators~openai_gym~gym_sensors.py | """
---------------------------------------------------------------------------
Air Force Research Laboratory (AFRL) Autonomous Capabilities Team (ACT3)
Reinforcement Learning (RL) Core.
This is a US Government Work not subject to copyright protection in the US.
The use, dissemination or disclosure of data in this file is subject to
limitation or restriction. See accompanying README and LICENSE for details.
---------------------------------------------------------------------------
Sensors for OpenAIGymSimulator
"""
import typing
import numpy as np
from numpy_ringbuffer import RingBuffer
from corl.libraries.plugin_library import PluginLibrary
from corl.libraries.property import BoxProp, RepeatedProp
from corl.simulators.base_parts import BaseSensor
from corl.simulators.openai_gym.gym_available_platforms import OpenAIGymAvailablePlatformTypes
from corl.simulators.openai_gym.gym_simulator import OpenAIGymSimulator
class OpenAiGymStateSensor(BaseSensor):
"""
Sensor that reports the observation of a given platform gym environment
"""
def __init__(self, parent_platform, config=None):
obs_space = parent_platform.observation_space
class GymSensorProp(BoxProp):
"""
GymSensorProp can be updated via config and valdidate by pydantic
"""
name: str = "GymStateSensor"
low: typing.List[float] = obs_space.low.tolist()
high: typing.List[float] = obs_space.high.tolist()
unit: typing.List[str] = ["None"] * len(obs_space.low)
description: str = "Gym Space"
super().__init__(parent_platform=parent_platform, config=config, property_class=GymSensorProp)
@property
def exclusiveness(self) -> typing.Set[str]:
"""Return exclusiveness"""
return {"state_sensor"}
def _calculate_measurement(self, state):
return state.obs[self.parent_platform.name]
PluginLibrary.AddClassToGroup(
OpenAiGymStateSensor, "Sensor_State", {
"simulator": OpenAIGymSimulator, "platform_type": OpenAIGymAvailablePlatformTypes.MAIN
}
)
class OpenAiGymRepeatedStateSensor(BaseSensor):
"""
Sensor that reports the observation of a given platform gym environment
"""
def __init__(self, parent_platform, config=None):
obs_space = parent_platform.observation_space
class GymSensorProp(BoxProp):
"""
GymSensorProp can be updated via config and valdidate by pydantic
"""
name: str = "GymStateSensor"
low: typing.List[float] = obs_space.low.tolist()
high: typing.List[float] = obs_space.high.tolist()
unit: typing.List[str] = ["None"] * len(obs_space.low)
description: str = "Gym Space"
class GymSensorRepeatedProp(RepeatedProp):
"""
GymSensorProp can be updated via config and valdidate by pydantic
"""
name: str = "GymStateRepeatedSensor"
max_len: int = 10
child_space: typing.Dict[str, GymSensorProp] = {"GymState": GymSensorProp()} # type: ignore
description: str = "Gym Space Repeated"
super().__init__(config=config, parent_platform=parent_platform, property_class=GymSensorRepeatedProp)
self.obs_buffer = RingBuffer(capacity=self.measurement_properties.max_len, dtype=np.ndarray)
@property
def exclusiveness(self) -> typing.Set[str]:
"""Return exclusiveness"""
return {"state_sensor"}
def _calculate_measurement(self, state):
self.obs_buffer.append(state.obs[self.parent_platform.name])
ret = map(lambda x: {"GymState": x}, self.obs_buffer)
return list(ret)
PluginLibrary.AddClassToGroup(
OpenAiGymRepeatedStateSensor,
"Sensor_RepeatedState", {
"simulator": OpenAIGymSimulator, "platform_type": OpenAIGymAvailablePlatformTypes.MAIN
}
)
| [] |
2024-01-10 | act3-ace/CoRL | corl~simulators~openai_gym~gym_controllers.py | """
---------------------------------------------------------------------------
Air Force Research Laboratory (AFRL) Autonomous Capabilities Team (ACT3)
Reinforcement Learning (RL) Core.
This is a US Government Work not subject to copyright protection in the US.
The use, dissemination or disclosure of data in this file is subject to
limitation or restriction. See accompanying README and LICENSE for details.
---------------------------------------------------------------------------
Provides controllers to pass actions to wrapped OpenAI Gym Environments
"""
import numbers
import typing
import gym
import numpy as np
from corl.libraries.plugin_library import PluginLibrary
from corl.libraries.property import BoxProp, DiscreteProp
from corl.simulators.base_parts import BaseController
from corl.simulators.openai_gym.gym_available_platforms import OpenAIGymAvailablePlatformTypes
from corl.simulators.openai_gym.gym_simulator import OpenAIGymSimulator
class OpenAIGymMainController(BaseController):
"""
GymController implementation for passing actions to wrapped OpenAI Gym Environments
"""
def __init__(self, parent_platform, config=None):
act_space = parent_platform.action_space
cont_prop: typing.Union[DiscreteProp, BoxProp]
if isinstance(act_space, gym.spaces.Discrete):
class DiscreteGymProp(DiscreteProp):
"""
DiscreteGymProp can be updated via config and valdidate by pydantic
"""
name: str = "gym controller"
unit: str = "None"
n: int = act_space.n
description: str = "gym env action space"
cont_prop = DiscreteGymProp
elif isinstance(act_space, gym.spaces.Box):
class BoxGymProp(BoxProp):
"""
BoxGymProp can be updated via config and valdidate by pydantic
"""
name: str = "gym controller"
low: typing.List[float] = act_space.low.tolist()
high: typing.List[float] = act_space.high.tolist()
dtype: np.dtype = act_space.dtype
unit: typing.List[str] = ["None"] * len(act_space.low)
description: str = "gym env action space"
cont_prop = BoxGymProp
else:
raise RuntimeError(f"This controller does not currently know how to handle a {type(act_space)} action space")
super().__init__(parent_platform=parent_platform, config=config, property_class=cont_prop)
@property
def exclusiveness(self) -> typing.Set[str]:
"""Return exclusiveness"""
return {"main_controller"}
def apply_control(self, control: np.ndarray) -> None:
self.parent_platform.save_action_to_platform(control)
def get_applied_control(self) -> typing.Union[np.ndarray, numbers.Number]:
return self.parent_platform.get_applied_action()
PluginLibrary.AddClassToGroup(
OpenAIGymMainController, "Controller_Gym", {
"simulator": OpenAIGymSimulator, "platform_type": OpenAIGymAvailablePlatformTypes.MAIN
}
)
class OpenAIGymDuplicateController(OpenAIGymMainController):
"""
GymController implementation for passing actions to wrapped OpenAI Gym Environments
"""
@property
def exclusiveness(self) -> typing.Set[str]:
"""Return exclusiveness"""
return set()
PluginLibrary.AddClassToGroup(
OpenAIGymDuplicateController,
"Controller_Duplicate_Gym", {
"simulator": OpenAIGymSimulator, "platform_type": OpenAIGymAvailablePlatformTypes.MAIN
}
)
| [] |
2024-01-10 | act3-ace/CoRL | test~glues~test_observe_sensor.py | """
---------------------------------------------------------------------------
Air Force Research Laboratory (AFRL) Autonomous Capabilities Team (ACT3)
Reinforcement Learning (RL) Core.
This is a US Government Work not subject to copyright protection in the US.
The use, dissemination or disclosure of data in this file is subject to
limitation or restriction. See accompanying README and LICENSE for details.
---------------------------------------------------------------------------
"""
import gym
import numpy as np
from pydantic import ValidationError
import pytest
import typing
import math
from corl.glues.common.observe_sensor import ObserveSensor
from corl.libraries.property import BoxProp
from corl.simulators.base_parts import BaseSensor
from corl.simulators.base_platform import BasePlatform
from corl.libraries.plugin_library import PluginLibrary
from corl.simulators.openai_gym.gym_simulator import OpenAIGymSimulator
from corl.simulators.openai_gym.gym_available_platforms import OpenAIGymAvailablePlatformTypes
class TestSensor(BaseSensor):
def __init__(self, parent_platform, config):
super().__init__(parent_platform=parent_platform, config=config, property_class=config['property_class'])
def _calculate_measurement(self, state):
return np.array([1.0, 1.0]).astype(np.float32)
PluginLibrary.AddClassToGroup(
TestSensor, "Sensor_Test", {
"simulator": OpenAIGymSimulator, "platform_type": OpenAIGymAvailablePlatformTypes.MAIN
}
)
class TestSensor2D(BaseSensor):
def __init__(self, parent_platform, config):
super().__init__(parent_platform=parent_platform, config=config, property_class=config['property_class'])
def _calculate_measurement(self, state):
return np.array([[1.0, 1.0], [1.5, 1.5]]).astype(np.float32)
PluginLibrary.AddClassToGroup(
TestSensor2D, "Sensor_Test2D", {
"simulator": OpenAIGymSimulator, "platform_type": OpenAIGymAvailablePlatformTypes.MAIN
}
)
def build_observe_sensor(sensorname, sensorclass, propconfig, output_units=None):
sensorconfig = {
"parent_platform": "none",
"config": {},
"property_class": BoxProp,
"properties": propconfig
}
platformconfig = {
"platform_name": "blue0",
"platform": "none",
"parts_list": [(sensorclass, sensorconfig)]
}
class TestPlatform(BasePlatform):
def __init__(self, platform_name, platform, parts_list):
super().__init__(platform_name=platform_name, platform=platform, parts_list=parts_list)
@property
def operable(self):
return True
observesensorconfig = {
"name": "ObserveSensor",
"sensor": sensorname,
"normalization": {"enabled": False},
"agent_name": "blue0",
"platform": TestPlatform(**platformconfig),
"output_units": output_units
}
return ObserveSensor(**observesensorconfig)
# No output units specified, sensor has default units
def test_observe_sensor_no_unit_default():
propconfig = {
"name": "TestProp",
"low": [1.0, 1.0],
"high": [2.0, 2.0],
"unit": ["meter", "radian"],
"description": "Test Space"
}
observe_sensor = build_observe_sensor("Sensor_Test", TestSensor, propconfig)
# Check observation_units
observation_units = observe_sensor.observation_units()[observe_sensor.Fields.DIRECT_OBSERVATION]
assert observation_units == ["m", "rad"]
# Check observation_space
observation_space = observe_sensor.observation_space()[observe_sensor.Fields.DIRECT_OBSERVATION]
np.testing.assert_array_equal(observation_space.low, np.array([1.0, 1.0]).astype(np.float32))
np.testing.assert_array_equal(observation_space.high, np.array([2.0, 2.0]).astype(np.float32))
# Check get_observation
observe_sensor._sensor.calculate_and_cache_measurement(None)
observation = observe_sensor.get_observation({}, {}, {})[observe_sensor.Fields.DIRECT_OBSERVATION]
np.testing.assert_array_equal(observation, np.array([1.0, 1.0]).astype(np.float32))
# No output units specified, sensor does not have default units
def test_observe_sensor_no_unit_not_default():
propconfig = {
"name": "TestProp",
"low": [1.0, 1.0],
"high": [2.0, 2.0],
"unit": ["feet", "degree"],
"description": "Test Space"
}
observe_sensor = build_observe_sensor("Sensor_Test", TestSensor, propconfig)
# Check observation_units
observation_units = observe_sensor.observation_units()[observe_sensor.Fields.DIRECT_OBSERVATION]
assert observation_units == ["m", "rad"]
# Check observation_space
observation_space = observe_sensor.observation_space()[observe_sensor.Fields.DIRECT_OBSERVATION]
np.testing.assert_array_equal(observation_space.low, np.array([1.0 / 3.28084, 1.0 * math.pi / 180.0]).astype(np.float32))
np.testing.assert_array_equal(observation_space.high, np.array([2.0 / 3.28084, 2.0 * math.pi / 180.0]).astype(np.float32))
# Check get_observation
observe_sensor._sensor.calculate_and_cache_measurement(None)
observation = observe_sensor.get_observation({}, {}, {})[observe_sensor.Fields.DIRECT_OBSERVATION]
np.testing.assert_array_equal(observation, np.array([1.0 / 3.28084, 1.0 * math.pi / 180.0]).astype(np.float32))
# Output units specified, sensor has default units
def test_observe_sensor_unit_default():
propconfig = {
"name": "TestProp",
"low": [1.0, 1.0],
"high": [2.0, 2.0],
"unit": ["meter", "radian"],
"description": "Test Space"
}
output_units = ["feet", "degree"]
observe_sensor = build_observe_sensor("Sensor_Test", TestSensor, propconfig, output_units)
# Check observation_units
observation_units = observe_sensor.observation_units()[observe_sensor.Fields.DIRECT_OBSERVATION]
assert observation_units == ["ft", "deg"]
# Check observation_space
observation_space = observe_sensor.observation_space()[observe_sensor.Fields.DIRECT_OBSERVATION]
np.testing.assert_array_equal(observation_space.low, np.array([3.28084, 180.0 / math.pi]).astype(np.float32))
np.testing.assert_array_equal(observation_space.high, np.array([2.0 * 3.28084, 2.0 * 180.0 / math.pi]).astype(np.float32))
# Check get_observation
observe_sensor._sensor.calculate_and_cache_measurement(None)
observation = observe_sensor.get_observation({}, {}, {})[observe_sensor.Fields.DIRECT_OBSERVATION]
np.testing.assert_array_equal(observation, np.array([3.28084, 180.0 / math.pi]).astype(np.float32))
# Output units specified, sensor does not have default units
def test_observe_sensor_unit_not_default():
propconfig = {
"name": "TestProp",
"low": [1.0, 1.0],
"high": [2.0, 2.0],
"unit": ["feet", "degree"],
"description": "Test Space"
}
output_units = ["nm", "degree"]
observe_sensor = build_observe_sensor("Sensor_Test", TestSensor, propconfig, output_units)
# Check observation_units
observation_units = observe_sensor.observation_units()[observe_sensor.Fields.DIRECT_OBSERVATION]
assert observation_units == ["nm", "deg"]
# Check observation_space
observation_space = observe_sensor.observation_space()[observe_sensor.Fields.DIRECT_OBSERVATION]
np.testing.assert_array_equal(observation_space.low, np.array([1.0 / 1852 / 3.28084, 1.0]).astype(np.float32))
np.testing.assert_array_equal(observation_space.high, np.array([2.0 / 1852 / 3.28084, 2.0]).astype(np.float32))
# Check get_observation
observe_sensor._sensor.calculate_and_cache_measurement(None)
observation = observe_sensor.get_observation({}, {}, {})[observe_sensor.Fields.DIRECT_OBSERVATION]
np.testing.assert_array_equal(observation, np.array([1.0 / 1852 / 3.28084, 1.0]).astype(np.float32))
# Output units specified, sensor does not have default units, 2D case
def test_observe_sensor_unit_not_default_2d():
propconfig = {
"name": "TestProp",
"low": [[1.0, 1.0], [1.5, 1.5]],
"high": [[2.0, 2.0], [2.5, 2.5]],
"unit": [["feet", "degree"], ["feet", "degree"]],
"description": "Test Space"
}
output_units = [["nm", "rad"], ["nm", "rad"]]
observe_sensor = build_observe_sensor("Sensor_Test2D", TestSensor2D, propconfig, output_units)
# Check observation_units
observation_units = observe_sensor.observation_units()[observe_sensor.Fields.DIRECT_OBSERVATION]
assert observation_units == [["nm", "rad"], ["nm", "rad"]]
# Check observation_space
observation_space = observe_sensor.observation_space()[observe_sensor.Fields.DIRECT_OBSERVATION]
np.testing.assert_array_equal(observation_space.low, np.array([[1.0 / 1852 / 3.28084, 1.0 * math.pi / 180.0], [1.5 / 1852 / 3.28084, 1.5 * math.pi / 180.0]]).astype(np.float32))
np.testing.assert_array_equal(observation_space.high, np.array([[2.0 / 1852 / 3.28084, 2.0 * math.pi / 180.0], [2.5 / 1852 / 3.28084, 2.5 * math.pi / 180.0]]).astype(np.float32))
# Check get_observation
observe_sensor._sensor.calculate_and_cache_measurement(None)
observation = observe_sensor.get_observation({}, {}, {})[observe_sensor.Fields.DIRECT_OBSERVATION]
expected_observation = np.array([[1.0 / 1852 / 3.28084, 1.0 * math.pi / 180.0], [1.5 / 1852 / 3.28084, 1.5 * math.pi / 180.0]]).astype(np.float32)
np.testing.assert_array_equal(observation, expected_observation)
# No output units specified, sensor does not have default units, 2D case
def test_observe_sensor_no_unit_not_default_2d():
propconfig = {
"name": "TestProp",
"low": [[1.0, 1.0], [1.5, 1.5]],
"high": [[2.0, 2.0], [2.5, 2.5]],
"unit": [["feet", "degree"], ["feet", "degree"]],
"description": "Test Space"
}
observe_sensor = build_observe_sensor("Sensor_Test2D", TestSensor2D, propconfig)
# Check observation_units
observation_units = observe_sensor.observation_units()[observe_sensor.Fields.DIRECT_OBSERVATION]
assert observation_units == [["m", "rad"], ["m", "rad"]]
# Check observation_space
observation_space = observe_sensor.observation_space()[observe_sensor.Fields.DIRECT_OBSERVATION]
np.testing.assert_array_equal(observation_space.low, np.array([[1.0 / 3.28084, 1.0 * math.pi / 180.0], [1.5 / 3.28084, 1.5 * math.pi / 180.0]]).astype(np.float32))
np.testing.assert_array_equal(observation_space.high, np.array([[2.0 / 3.28084, 2.0 * math.pi / 180.0], [2.5 / 3.28084, 2.5 * math.pi / 180.0]]).astype(np.float32))
# Check get_observation
observe_sensor._sensor.calculate_and_cache_measurement(None)
observation = observe_sensor.get_observation({}, {}, {})[observe_sensor.Fields.DIRECT_OBSERVATION]
expected_observation = np.array([[1.0 / 3.28084, 1.0 * math.pi / 180.0], [1.5 / 3.28084, 1.5 * math.pi / 180.0]]).astype(np.float32)
np.testing.assert_array_equal(observation, expected_observation)
| [] |
2024-01-10 | act3-ace/CoRL | corl~simulators~openai_gym~gym_available_platforms.py | """
---------------------------------------------------------------------------
Air Force Research Laboratory (AFRL) Autonomous Capabilities Team (ACT3)
Reinforcement Learning (RL) Core.
This is a US Government Work not subject to copyright protection in the US.
The use, dissemination or disclosure of data in this file is subject to
limitation or restriction. See accompanying README and LICENSE for details.
---------------------------------------------------------------------------
Provides AvailablePlatformTypes for the Open AI Gym Simulator
"""
from __future__ import annotations
from corl.libraries.plugin_library import PluginLibrary
from corl.simulators.base_available_platforms import BaseAvailablePlatformTypes
from corl.simulators.openai_gym.gym_simulator import OpenAIGymSimulator
class OpenAIGymAvailablePlatformTypes(BaseAvailablePlatformTypes):
"""Enumeration that outlines the platform types that have been implemented
"""
MAIN = (1, )
@classmethod
def ParseFromNameModel(cls, config: dict) -> OpenAIGymAvailablePlatformTypes:
"""
This just returns the main platform type, as openai gym simulators are simple
"""
return OpenAIGymAvailablePlatformTypes.MAIN
PluginLibrary.AddClassToGroup(OpenAIGymAvailablePlatformTypes, "OpenAIGymSimulator_Platforms", {"simulator": OpenAIGymSimulator})
| [] |
2024-01-10 | Jonathan-Henriksen/NeuralJourney | scripts~finetunelist.py | import openai
print("")
for finetuning in openai.FineTuningJob.list().data:
print("___________________________________________________________________\n")
print(f'Model name: {finetuning.fine_tuned_model}')
print(f'File name: {openai.File.retrieve(finetuning.training_file).filename}\n')
print("___________________________________________________________________") | [] |
2024-01-10 | DrDavidL/auto_analyze | main_docker.py | import numpy as np
# import langchain
import pandas as pd
import missingno as msno
import io
import sys
from ydata_profiling import ProfileReport
import streamlit as st
from streamlit_pandas_profiling import st_profile_report
import streamlit.components.v1 as components
import numpy as np
import plotly.figure_factory as ff
import matplotlib.pyplot as plt
import seaborn as sns
from statsmodels.imputation import mice
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler, MinMaxScaler, normalize
from sklearn.decomposition import PCA
from sklearn.metrics import accuracy_score, confusion_matrix, roc_curve, roc_auc_score, average_precision_score, precision_recall_curve, auc, f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.impute import SimpleImputer
from sklearn import svm
from langchain.agents import AgentType, initialize_agent
from langchain.chat_models import ChatOpenAI
from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
from langchain.llms import OpenAI
import json
import base64
import plotly.io as pio
from bs4 import BeautifulSoup
from PIL import Image
from scipy import stats
import lifelines
from lifelines import KaplanMeierFitter, CoxPHFitter
from prompts import *
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier, NeighborhoodComponentsAnalysis
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
import openai
from openai import OpenAI
from tableone import TableOne
from scipy import stats
from streamlit_chat import message
import random
from random import randint
import os
from sklearn import linear_model
import statsmodels.api as sm
import category_encoders as ce
from mpl_toolkits.mplot3d import Axes3D
import shap
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
import matplotlib.pyplot as plt
st.set_page_config(page_title='AutoAnalyzer', layout = 'centered', page_icon = ':chart_with_upwards_trend:', initial_sidebar_state = 'auto')
# if st.button('Click to toggle sidebar state'):
# st.session_state.sidebar_state = 'collapsed' if st.session_state.sidebar_state == 'expanded' else 'expanded'
# # Force an app rerun after switching the sidebar state.
# st.experimental_rerun()
# # Initialize a session state variable that tracks the sidebar state (either 'expanded' or 'collapsed').
if 'last_response' not in st.session_state:
st.session_state.last_response = ''
if 'df' not in st.session_state:
st.session_state.df = pd.DataFrame()
if 'modified_df' not in st.session_state:
st.session_state.modified_df = pd.DataFrame()
if "openai_api_key" not in st.session_state:
st.session_state.openai_api_key = ''
if "gen_csv" not in st.session_state:
st.session_state.gen_csv = None
if "df_to_download" not in st.session_state:
st.session_state.df_to_download = None
@st.cache_data
def is_valid_api_key(api_key):
openai.api_key = api_key
try:
# Send a test request to the OpenAI API
response = openai.Completion.create(model="text-davinci-003",
prompt="Hello world")['choices'][0]['text']
return True
except Exception:
pass
return False
def is_bytes_like(obj):
return isinstance(obj, (bytes, bytearray, memoryview))
def save_image(plot, filename):
if is_bytes_like(plot):
img = io.BytesIO(plot)
else:
img = io.BytesIO()
plot.savefig(img, format='png')
btn = st.download_button(
label="Download your plot.",
data = img,
file_name=filename,
mime='image/png',
)
@st.cache_data
def generate_regression_equation(intercept, coef, x_col):
equation = f"y = {round(intercept,4)}"
for c, feature in zip(coef, x_col):
equation += f" + {round(c,4)} * {feature}"
return equation
def df_download_options(df, report_type, format):
file_name = f'{report_type}.{format}'
if format == 'csv':
data = df.to_csv(index=True)
mime = 'text/csv'
if format == 'json':
data = df.to_json(orient='records')
mime = 'application/json'
if format == 'html':
data = df.to_html()
mime = 'text/html'
if True:
st.download_button(
label="Download your report.",
data=data,
# data=df.to_csv(index=True),
file_name=file_name,
mime=mime,
)
@st.cache_data
def plot_mult_linear_reg(df, x, y):
# with sklearn
regr = linear_model.LinearRegression()
regr.fit(x, y)
# st.write('Intercept: \n', regr.intercept_)
# st.write('Coefficients: \n', regr.coef_)
# with statsmodels
x = sm.add_constant(x) # adding a constant
model = sm.OLS(y, x).fit()
predictions = model.predict(x)
print_model = model.summary2()
st.write(print_model)
try:
df_mlr_output = print_model.tables[1]
except:
st.write("couldn't generate dataframe version")
return print_model, df_mlr_output, regr.intercept_, regr.coef_
@st.cache_data
def all_categorical(df):
categ_cols = df.select_dtypes(include=['object']).columns.tolist()
numeric_cols = [col for col in df.columns if df[col].nunique() == 2 and df[col].dtype != 'object']
filtered_categorical_cols = [col for col in categ_cols if df[col].nunique() <= 15]
all_categ = filtered_categorical_cols + numeric_cols
return all_categ
@st.cache_data
def all_numerical(df):
numerical_cols = df.select_dtypes(include='number').columns.tolist()
for col in df.select_dtypes(include='object').columns:
if df[col].nunique() == 2:
unique_values = df[col].unique()
if 0 in unique_values and 1 in unique_values:
continue
value_counts = df[col].value_counts()
most_frequent_value = value_counts.idxmax()
least_frequent_value = value_counts.idxmin()
if most_frequent_value != 0 and least_frequent_value != 1:
df[col] = np.where(df[col] == most_frequent_value, 0, 1)
st.write(f"Replaced most frequent value '{most_frequent_value}' with 0 and least frequent value '{least_frequent_value}' with 1 in column '{col}'.")
numerical_cols.append(col) # Update numerical_cols
return numerical_cols
def filter_dataframe(df):
# Get the column names and data types of the dataframe
columns = df.columns
dtypes = df.dtypes
# Create a sidebar for selecting columns to exclude
excluded_columns = st.multiselect("Exclude Columns", columns)
# Create a copy of the dataframe to apply the filters
filtered_df = df.copy()
# Exclude the selected columns from the dataframe
filtered_df = filtered_df.drop(excluded_columns, axis=1)
# Get the column names and data types of the filtered dataframe
filtered_columns = filtered_df.columns
filtered_dtypes = filtered_df.dtypes
# Create a sidebar for selecting numerical variables and their range
numerical_columns = [col for col, dtype in zip(filtered_columns, filtered_dtypes) if dtype in ['int64', 'float64']]
for col in numerical_columns:
min_val = filtered_df[col].min()
max_val = filtered_df[col].max()
st.write(f"**{col}**")
min_range, max_range = st.slider("", min_val, max_val, (min_val, max_val), key=col)
# Filter the dataframe based on the selected range
if min_range > min_val or max_range < max_val:
filtered_df = filtered_df[(filtered_df[col] >= min_range) & (filtered_df[col] <= max_range)]
# Create a sidebar for selecting categorical variables and their values
categorical_columns = [col for col, dtype in zip(filtered_columns, filtered_dtypes) if dtype == 'object']
for col in categorical_columns:
unique_values = filtered_df[col].unique()
selected_values = st.multiselect(col, unique_values, unique_values)
# Filter the dataframe based on the selected values
if len(selected_values) < len(unique_values):
filtered_df = filtered_df[filtered_df[col].isin(selected_values)]
return filtered_df
# Function to generate a download link
@st.cache_data
def get_download_link(file_path, file_type):
with open(file_path, "rb") as file:
contents = file.read()
base64_data = base64.b64encode(contents).decode("utf-8")
download_link = f'<a href="data:application/octet-stream;base64,{base64_data}" download="tableone_results.{file_type}">Click here to download the TableOne results in {file_type} format.</a>'
return download_link
@st.cache_data
def find_binary_categorical_variables(df):
binary_categorical_vars = []
for col in df.columns:
unique_values = df[col].unique()
if len(unique_values) == 2:
binary_categorical_vars.append(col)
return binary_categorical_vars
@st.cache_data
def calculate_odds_older(table):
odds_cases = table.iloc[1, 1] / table.iloc[1, 0]
odds_controls = table.iloc[0, 1] / table.iloc[0, 0]
odds_ratio = odds_cases / odds_controls
return odds_cases, odds_controls, odds_ratio
@st.cache_data
def calculate_odds(table):
odds_cases = table.iloc[1, 1] / table.iloc[1, 0]
odds_controls = table.iloc[0, 1] / table.iloc[0, 0]
odds_ratio = odds_cases / odds_controls
return odds_cases, odds_controls, odds_ratio
@st.cache_data
def generate_2x2_table(df, var1, var2):
table = pd.crosstab(df[var1], df[var2], margins=True)
table.columns = ['No ' + var2, 'Yes ' + var2, 'Total']
table.index = ['No ' + var1, 'Yes ' + var1, 'Total']
return table
@st.cache_data
def plot_survival_curve(df, time_col, event_col):
# Create a Kaplan-Meier fitter object
try:
kmf = KaplanMeierFitter()
# Fit the survival curve using the dataframe
kmf.fit(df[time_col], event_observed=df[event_col])
# Plot the survival curve
fig, ax = plt.subplots()
kmf.plot(ax=ax)
# Add labels and title to the plot
ax.set_xlabel('Time')
ax.set_ylabel('Survival Probability')
ax.set_title('Survival Curve')
# Display the plot
st.pyplot(fig)
return fig
except TypeError:
st.warning("Find the right columns for time and event.")
@st.cache_data
def calculate_rr_arr_nnt(tn, fp, fn, tp):
rr = (tp / (tp + fn)) / (fp / (fp + tn)) if fp + tn > 0 and tp + fn > 0 else np.inf
arr = (fn / (fn + tp)) - (fp / (fp + tn)) if fn + tp > 0 and fp + tn > 0 else np.inf
nnt = 1 / arr if arr > 0 else np.inf
return rr, arr, nnt
def fetch_api_key():
# Try to get the API key from an environment variable
api_key = os.getenv("OPENAI_API_KEY")
# If the API key is found in the environment variables, return it
if api_key:
return api_key
# If the API key is not found, check if it's already in the session state
if 'openai_api_key' in st.session_state and st.session_state.openai_api_key:
return st.session_state.openai_api_key
# If the API key is not in the environment variables or session state, prompt the user
st.sidebar.warning("Please enter your API key.")
api_key = st.sidebar.text_input("API Key:", key='api_key_input')
# If the user provides the API key, store it in the session state and return it
if api_key:
st.session_state.openai_api_key = api_key
return api_key
else:
# If no API key is provided, display an error
st.error("API key is required to proceed.")
return None
def check_password():
return True
# """Returns `True` if the user had the correct password."""
# def password_entered():
# """Checks whether a password entered by the user is correct."""
# if st.session_state["password"] == os.getenv("password"):
# st.session_state["password_correct"] = True
# del st.session_state["password"] # don't store password
# else:
# st.session_state["password_correct"] = False
# if "password_correct" not in st.session_state:
# # First run, show input for password.
# st.text_input(
# "GPT features require a password.", type="password", on_change=password_entered, key="password"
# )
# st.warning("*Please contact David Liebovitz, MD if you need an updated password for access.*")
# return False
# elif not st.session_state["password_correct"]:
# # Password not correct, show input + error.
# st.text_input(
# "GPT features require a password.", type="password", on_change=password_entered, key="password"
# )
# st.error("😕 Password incorrect")
# return False
# else:
# # Password correct.
# # fetch_api_key()
# return True
@st.cache_data
def assess_data_readiness(df):
readiness_summary = {}
st.write('White horizontal lines (if present) show missing data')
try:
missing_matrix = msno.matrix(df)
# st.write('line 2 of assess_data_readiness')
st.pyplot(missing_matrix.figure)
# st.write('line 3 of assess_data_readiness')
missing_heatmap = msno.heatmap(df)
st.write('Heatmap with convergence of missing elements (if any)')
st.pyplot(missing_heatmap.figure)
except:
st.warning('Dataframe not yet amenable to missing for "missingno" library analysis.')
# Check if the DataFrame is empty
try:
if df.empty:
readiness_summary['data_empty'] = True
readiness_summary['columns'] = {}
readiness_summary['missing_columns'] = []
readiness_summary['inconsistent_data_types'] = []
readiness_summary['missing_values'] = {}
readiness_summary['data_ready'] = False
return readiness_summary
except:
st.warning('Dataframe not yet amenable to empty analysis.')
# Get column information
# st.write('second line of assess_data_readiness')
try:
columns = {col: str(df[col].dtype) for col in df.columns}
readiness_summary['columns'] = columns
except:
st.warning('Dataframe not yet amenable to column analysis.')
# Check for missing columns
# st.write('third line of assess_data_readiness')
try:
missing_columns = df.columns[df.isnull().all()].tolist()
readiness_summary['missing_columns'] = missing_columns
except:
st.warning('Dataframe not yet amenable to missing column analysis.')
# Check for inconsistent data types
# st.write('fourth line of assess_data_readiness')
try:
inconsistent_data_types = []
for col in df.columns:
unique_data_types = df[col].apply(type).drop_duplicates().tolist()
if len(unique_data_types) > 1:
inconsistent_data_types.append(col)
readiness_summary['inconsistent_data_types'] = inconsistent_data_types
except:
st.warning('Dataframe not yet amenable to data type analysis.')
# Check for missing values
# st.write('fifth line of assess_data_readiness')
try:
missing_values = df.isnull().sum().to_dict()
readiness_summary['missing_values'] = missing_values
except:
st.warning('Dataframe not yet amenable to specific missing value analysis.')
# Determine overall data readiness
# st.write('sixth line of assess_data_readiness')
try:
readiness_summary['data_empty'] = False
if missing_columns or inconsistent_data_types or any(missing_values.values()):
readiness_summary['data_ready'] = False
else:
readiness_summary['data_ready'] = True
return readiness_summary
except:
st.warning('Dataframe not yet amenable to overall data readiness analysis.')
@st.cache_data
def process_model_output(output):
# Convert JSON to string if necessary
if isinstance(output, dict):
output = json.dumps(output)
# if isinstance(output, str):
# output = json.loads(output)
if 'arguments' in output:
output = output['arguments']
start_marker = '```python\n'
end_marker = '\n```'
start_index = output.find(start_marker)
end_index = output.find(end_marker, start_index)
# If the markers are found, extract the code part
# Adjust the start index to not include the start_marker
if start_index != -1 and end_index != -1:
code_string = output[start_index + len(start_marker) : end_index]
else:
code_string = ''
return code_string.strip()
@st.cache_data
def safety_check(code):
dangerous_keywords = [' exec', ' eval', ' open', ' sys', ' subprocess', ' del',
' delete', ' remove', ' os', ' shutil', ' pip',' conda',
' st.write', ' exit', ' quit', ' globals', ' locals', ' dir',
' reload', ' lambda', ' setattr', ' getattr', ' delattr',
' yield', ' assert', ' break', ' continue', ' raise', ' try',
'compile', '__import__'
]
for keyword in dangerous_keywords:
if keyword in code:
return False, "Concerning code detected."
return True, "Safe to execute."
def replace_show_with_save(code_string, filename='output.png'):
# Prepare save command
save_cmd1 = f"plt.savefig('./images/{filename}')"
save_cmd2 = f"pio.write_image(fig, './images/{filename}')"
# Replace plt.show() with plt.savefig()
code_string = code_string.replace('plt.show()', save_cmd1)
code_string = code_string.replace('fig.show()', save_cmd2)
return code_string
def start_chatbot2(df, selected_model, key = "main routine"):
fetch_api_key()
openai.api_key = st.session_state.openai_api_key
openai_api_key = st.session_state.openai_api_key
agent = create_pandas_dataframe_agent(
ChatOpenAI(temperature=0, model=selected_model),
df,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
)
if "messages_df" not in st.session_state:
st.session_state["messages_df"] = []
st.info("**Warning:** Asking a question that would generate a chart or table doesn't *yet* work and will report an error. For the moment, just ask for values. This is a work in progress!")
# st.write("💬 Chatbot with access to your data...")
# Check if the API key exists as an environmental variable
api_key = os.environ.get("OPENAI_API_KEY")
if api_key:
# st.write("*API key active - ready to respond!*")
pass
else:
st.warning("API key not found as an environmental variable.")
api_key = st.text_input("Enter your OpenAI API key:")
if st.button("Save"):
if is_valid_api_key(api_key):
os.environ["OPENAI_API_KEY"] = api_key
st.success("API key saved as an environmental variable!")
else:
st.error("Invalid API key. Please enter a valid API key.")
csv_question = st.text_input("Your question, e.g., 'What is the mean age for men with diabetes?' *Do not ask for plots for this option.*", "")
if st.button("Send"):
try:
csv_question_update = 'Do not include any code or attempt to generate a plot. Indicate you can only respond with text. User question: ' + csv_question
st.session_state.messages_df.append({"role": "user", "content": csv_question_update})
output = agent.run(csv_question)
# if True:
# st.session_state.modified_df = df
st.session_state.messages_df.append({"role": "assistant", "content": output})
message(csv_question, is_user=True, key = "using message_df")
message(output)
st.session_state.modified_df = df
# chat_modified_csv = df.to_csv(index=False)
st.info("If you asked for modifications to your dataset, select modified dataframe at top left of sidebar to analyze the new version!")
# st.download_button(
# label="Download Modified Data!",
# data=chat_modified_csv,
# file_name="patient_data_modified.csv",
# mime="text/csv", key = 'modified_df'
# )
except Exception as e:
st.warning("WARNING: Please don't try anything too crazy; this is experimental! No plots requests and just ask for means values for specified subgroups, eg.")
st.write(f'Error: {e}')
# sys.exit(1)
def start_chatbot3(df, model):
fetch_api_key()
openai.api_key = st.session_state.openai_api_key
agent = create_pandas_dataframe_agent(
# ChatOpenAI(temperature=0, model="gpt-3.5-turbo"),
ChatOpenAI(temperature=0, model=model),
df,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
)
if "messages_df" not in st.session_state:
st.session_state["messages_df"] = []
# st.write("💬 Chatbot with access to your data...")
st.info("""**Warning:** This may generate an error. This is a work in progress!
If you get an error, try again.
""")
# Check if the API key exists as an environmental variable
api_key = os.environ.get("OPENAI_API_KEY")
if api_key:
# st.write("*API key active - ready to respond!*")
pass
else:
st.warning("API key not found as an environmental variable.")
api_key = st.text_input("Enter your OpenAI API key:")
if st.button("Save"):
if is_valid_api_key(api_key):
os.environ["OPENAI_API_KEY"] = api_key
st.success("API key saved as an environmental variable!")
else:
st.error("Invalid API key. Please enter a valid API key.")
csv_question = st.text_input("Your question, e.g., 'Create a scatterplot for age and BMI.' *This option only generates plots.* ", "")
if st.button("Send"):
try:
st.session_state.messages_df.append({"role": "user", "content": csv_question})
csv_input = csv_prefix + csv_question
output = agent.run(csv_input)
# st.write(output)
code_string = process_model_output(str(output))
# st.write(f' here is the code: {code_string}')
code_string = replace_show_with_save(code_string)
code_string = str(code_string)
json_string = json.dumps(code_string)
decoded_string = json.loads(json_string)
with st.expander("What is the code?"):
st.write('Here is the custom code for your request and the image below:')
st.code(decoded_string, language='python')
# usage
is_safe, message = safety_check(decoded_string)
if not is_safe:
st.write("Code safety concern. Try again.", message)
if is_safe:
try:
exec(decoded_string)
image = Image.open('./images/output.png')
st.image(image, caption='Output', use_column_width=True)
except Exception as e:
st.write('Error - we noted this was fragile! Try again.', e)
except Exception as e:
st.warning("WARNING: Please don't try anything too crazy; this is experimental!")
# sys.exit(1)
# return None, None
def start_plot_gpt4(df):
fetch_api_key()
openai.api_key = st.session_state.openai_api_key
agent = create_pandas_dataframe_agent(
ChatOpenAI(temperature=0, model="gpt-4"),
df,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
)
if "messages_df" not in st.session_state:
st.session_state["messages_df"] = []
# st.write("💬 Chatbot with access to your data...")
st.info("""**Warning:** This may generate an error. This is a work in progress!
If you get an error, try again.
""")
# Check if the API key exists as an environmental variable
api_key = os.environ.get("OPENAI_API_KEY")
if api_key:
# st.write("*API key active - ready to respond!*")
pass
else:
st.warning("API key not found as an environmental variable.")
api_key = st.text_input("Enter your OpenAI API key:")
if st.button("Save"):
if is_valid_api_key(api_key):
os.environ["OPENAI_API_KEY"] = api_key
st.success("API key saved as an environmental variable!")
else:
st.error("Invalid API key. Please enter a valid API key.")
csv_question = st.text_area("Your question, e.g., 'Create a heatmap. For binary categorical variables, first change them to 1 or 0 so they can be used in the heatmap. Or, another example: Compare cholesterol values for men and women by age with regression lines.", "")
if st.button("Send"):
try:
st.session_state.messages_df.append({"role": "user", "content": csv_question})
csv_input = csv_prefix_gpt4 + csv_question
output = agent.run(csv_input)
# st.write(output)
code_string = process_model_output(str(output))
# st.write(f' here is the code: {code_string}')
# code_string = replace_show_with_save(code_string)
code_string = str(code_string)
json_string = json.dumps(code_string)
decoded_string = json.loads(json_string)
with st.expander("What is the code?"):
st.write('Here is the custom code for your request and the image below:')
st.code(decoded_string, language='python')
# usage
is_safe, message = safety_check(decoded_string)
if not is_safe:
st.write("Code safety concern. Try again.", message)
if is_safe:
try:
exec(decoded_string)
image = Image.open('./images/output.png')
st.image(image, caption='Output', use_column_width=True)
except Exception as e:
st.write('Error - we noted this was fragile! Try again.', e)
except Exception as e:
st.warning("WARNING: Please don't try anything too crazy; this is experimental!")
# sys.exit(1)
# return None, None
@st.cache_resource
def generate_df(columns, n_rows, selected_model):
# Ensure the API key is set outside this function
system_prompt = """You are a medical data expert whose purpose is to generate realistic medical data to populate a dataframe. Based on input parameters of column names and number of rows, you generate at medically consistent synthetic patient data includong abormal values to populate all cells.
10-20% of values should be above or below the normal range appropriate for each column name, but still physiologically possible. For example, SBP could range from 90 to 190. Creatinine might go from 0.5 to 7.0. Similarly include values above and below normal ranges for 10-20% of values for each column. Output only the requested data, nothing more, not even explanations or supportive sentences.
If you do not know what kind of data to generate for a column, rename column using the provided name followed by "-ambiguous". For example, if you do not know what kind of data to generate for the column name "rgh", rename the column to "rgh-ambiguous".
Popululate ambiguous columns with randomly selected 1 or 0 values. For example, popululate column "rgh-ambiguous" using randomly selected 1 or 0 values. For diagnoses provided
as column headers, e.g., "diabetes", populate with randomly selected yes or no values. Populate all cells with appropriate values. No missing values.
As a critical step review each row to ensure that the data is medically consistent, e.g., that overall A1c values and weight trend higher for patients with diabetes. If not, regenerate the row or rows.
Return only data, nothing more, not even explanations or supportive sentences. Generate the requested data so it can be processed by the following code into a dataframe:
```
# Use StringIO to convert the string data into file-like object
data = io.StringIO(response.choices[0].message.content)
# Read the data into a DataFrame, skipping the first row
df = pd.read_csv(data, sep=",", skiprows=1, header=None, names=columns)
```
Your input parameters will be in this format
Columns: ```columns```
Number of rows: ```number```
"""
prompt = f"Columns: {columns}\nNumber of rows: {n_rows}"
try:
with st.spinner("Thinking..."):
client = OpenAI()
response = client.chat.completions.create(
model=selected_model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}
],
temperature=0.5
)
# Assuming the response is CSV-formatted data as a string
data = io.StringIO(response.choices[0].message.content)
# Read the data into a DataFrame
df = pd.read_csv(data, sep=",", header=None)
df.columns = columns # Set the column names
# Convert DataFrame to CSV and create download link
gen_csv = df.to_csv(index=False)
return df, gen_csv
except Exception as e:
st.error(f"An error occurred: {e}")
# Return an empty DataFrame and an empty string to ensure the return type is consistent
return pd.DataFrame(), ""
def start_chatbot1(selected_model):
# fetch_api_key()
openai.api_key = st.session_state.openai_api_key
client = OpenAI()
st.write("💬 Chatbot Teacher")
if "messages" not in st.session_state:
st.session_state["messages"] = [
{"role": "assistant", "content": "Hi! Ask me anything about data science and I'll try to answer it."}
]
with st.form("chat_input", clear_on_submit=True):
user_input = st.text_input(label="Your question:", placeholder="e.g., teach me about violin plots")
if st.form_submit_button("Send"):
if user_input:
st.session_state.messages.append({"role": "user", "content": user_input})
try:
with st.spinner("Thinking..."):
response = client.chat.completions.create(model=selected_model, messages=st.session_state.messages)
# Extract the message content and role from the response
# response_message = response.choices[0].message.content
msg_content = response.choices[0].message.content
# msg_role = response.choices[0].message["role"]
st.session_state.messages.append({"role": 'assistant', "content": msg_content})
except Exception as e:
st.exception("An error occurred: {}".format(e))
# Display messages
for msg in st.session_state.messages:
# Generate a unique key for each message
key = f"message_{randint(0, 10000000000)}"
# Call the message function to display the chat messages
message(msg["content"], is_user=msg["role"] == "user", key=key)
@st.cache_data
def generate_table_old(df, categorical_variable):
mytable = TableOne(df,
columns=df.columns.tolist(),
categorical=categorical,
groupby=categorical_variable,
pval=True)
return mytable
@st.cache_data
def generate_table(df, categorical_variable, nonnormal_variables):
# Generate the table using TableOne
mytable = TableOne(df,
columns=df.columns.tolist(),
categorical=categorical,
groupby=categorical_variable,
nonnormal=nonnormal_variables,
pval=True)
return mytable
@st.cache_data
def preprocess_for_pca(df):
included_cols = []
excluded_cols = []
binary_mapping = {} # initialize empty dict for binary mapping
binary_encoded_vars = [] # initialize empty list for binary encoded vars
# Create a binary encoder
bin_encoder = ce.BinaryEncoder()
for col in df.columns:
if pd.api.types.is_categorical_dtype(df[col]) or df[col].dtype == 'object':
unique = df[col].nunique()
# For binary categorical columns
if unique == 2:
most_freq = df[col].value_counts().idxmax()
least_freq = df[col].value_counts().idxmin()
df[col] = df[col].map({most_freq: 0, least_freq: 1})
binary_mapping[col] = {most_freq: 0, least_freq: 1} # add mapping to dict
included_cols.append(col)
# For categorical columns with less than 15 unique values
elif 2 < unique <= 15:
try:
# Perform binary encoding
df_transformed = bin_encoder.fit_transform(df[col])
# Drop the original column from df
df.drop(columns=[col], inplace=True)
# Join the transformed data to df
df = pd.concat([df, df_transformed], axis=1)
# Add transformed columns to binary encoded vars list and included_cols
transformed_cols = df_transformed.columns.tolist()
binary_encoded_vars.extend(transformed_cols)
included_cols.extend(transformed_cols)
except Exception as e:
st.write(f"Failure in encoding {col} due to {str(e)}")
excluded_cols.append(col)
else:
excluded_cols.append(col)
elif np.issubdtype(df[col].dtype, np.number):
included_cols.append(col)
else:
excluded_cols.append(col)
# Display binary mappings and binary encoded variables in streamlit
if binary_mapping:
st.write("Binary Mappings: ", binary_mapping)
if binary_encoded_vars:
st.write("Binary Encoded Variables: ", binary_encoded_vars)
return df[included_cols], included_cols, excluded_cols
@st.cache_data
def create_scree_plot(df):
temp_df_pca, included_cols, excluded_cols = preprocess_for_pca(df)
# Standardize the features
x = StandardScaler().fit_transform(temp_df_pca)
# Create a PCA instance: n_components should be None so variance is preserved from all initial features
pca = PCA(n_components=None)
pca.fit_transform(x)
# Scree plot
fig, ax = plt.subplots()
ax.plot(np.arange(1, len(pca.explained_variance_) + 1), np.cumsum(pca.explained_variance_ratio_))
ax.set_title('Cumulative Explained Variance')
ax.set_xlabel('Number of Components')
ax.set_ylabel('Cumulative Explained Variance Ratio')
st.pyplot(fig)
return fig
@st.cache_data
def perform_pca_plot(df):
st.write("Note: For this PCA analysis, categorical columns with 2 values are mapped to 1 and 0. Categories with more than 2 values have been binary encoded.")
temp_df_pca, included_cols, excluded_cols = preprocess_for_pca(df)
# Standardize the features
x = StandardScaler().fit_transform(temp_df_pca)
# Select the target column for PCA
cols_2_15_unique_vals = [col for col in included_cols if 2 <= df[col].nunique() <= 15]
target_col_pca = st.selectbox("Select the target column for PCA", cols_2_15_unique_vals)
num_unique_targets = df[target_col_pca].nunique() # Calculate the number of unique targets
# Ask the user to request either 2 or 3 component PCA
n_components = st.selectbox("Select the number of PCA components (2 or 3)", [2, 3])
# Create a PCA instance
pca = PCA(n_components=n_components)
principalComponents = pca.fit_transform(x)
# Depending on user choice, plot the appropriate PCA
if n_components == 2:
principalDf = pd.DataFrame(data=principalComponents, columns=['PC1', 'PC2'])
else:
principalDf = pd.DataFrame(data=principalComponents, columns=['PC1', 'PC2', 'PC3'])
finalDf = pd.concat([principalDf, df[[target_col_pca]]], axis=1)
fig = plt.figure(figsize=(8, 8))
if n_components == 2:
ax = fig.add_subplot(111)
else:
# ax = Axes3D(fig)
ax = plt.axes(projection='3d')
ax.set_zlabel('Principal Component 3', fontsize=15)
ax.set_xlabel('Principal Component 1', fontsize=15)
ax.set_ylabel('Principal Component 2', fontsize=15)
ax.set_title(f'{n_components} component PCA', fontsize=20)
targets = finalDf[target_col_pca].unique().tolist()
colors = sns.color_palette('husl', n_colors=num_unique_targets)
# finalDf
for target, color in zip(targets, colors):
indicesToKeep = finalDf[target_col_pca] == target
if n_components == 2:
ax.scatter(finalDf.loc[indicesToKeep, 'PC1'], finalDf.loc[indicesToKeep, 'PC2'], c=[color], s=50)
else:
ax.scatter(finalDf.loc[indicesToKeep, 'PC1'], finalDf.loc[indicesToKeep, 'PC2'], finalDf.loc[indicesToKeep, 'PC3'], c=[color], s=50)
ax.legend(targets)
# Make a scree plot
# Display the plot using Streamlit
st.pyplot(fig)
st.subheader("Use the PCA Updated Dataset for Machine Learning")
st.write("Download the current plot if you'd like to save it! Then, follow steps to apply machine learning to your PCA modified dataset.")
st.info("Step 1. Click Button to use the PCA Dataset for ML. Step 2. Select Modified Dataframe on left sidebar and switch to the Machine Learning tab. (You'll overfit if you click below again!)")
if st.button("Use PCA Updated dataset on Machine Learning Tab"):
st.session_state.modified_df = finalDf
return fig
@st.cache_data
def display_metrics(y_true, y_pred, y_scores):
# Compute metrics
f1 = f1_score(y_true, y_pred)
accuracy = accuracy_score(y_true, y_pred)
roc_auc = roc_auc_score(y_true, y_scores)
precision, recall, _ = precision_recall_curve(y_true, y_scores)
pr_auc = auc(recall, precision)
# Display metrics
st.info(f"**Your Model Metrics:** F1 score: {f1:.2f}, Accuracy: {accuracy:.2f}, ROC AUC: {roc_auc:.2f}, PR AUC: {pr_auc:.2f}")
with st.expander("Explanations for the Metrics"):
st.write(
# Explain differences
"""
### Explanation of Metrics
- **F1 score** is the harmonic mean of precision and recall, and it tries to balance the two. It is a good metric when you have imbalanced classes.
- **Accuracy** is the ratio of correct predictions to the total number of predictions. It can be misleading if the classes are imbalanced.
- **ROC AUC** (Receiver Operating Characteristic Area Under Curve) represents the likelihood of the classifier distinguishing between a positive sample and a negative sample. It's equal to 0.5 for random predictions and 1.0 for perfect predictions.
- **PR AUC** (Precision-Recall Area Under Curve) is another way of summarizing the trade-off between precision and recall, and it gives more weight to precision. It's useful when the classes are imbalanced.
""")
# st.write(f"Accuracy: {accuracy}")
st.write(plot_confusion_matrix(y_true, y_pred))
with st.expander("What is a confusion matrix?"):
st.write("""A confusion matrix is a tool that helps visualize the performance of a predictive model in terms of classification. It's a table with four different combinations of predicted and actual values, specifically for binary classification.
The four combinations are:
1. **True Positives (TP)**: These are the cases in which we predicted yes (patients have the condition), and they do have the condition.
2. **True Negatives (TN)**: We predicted no (patients do not have the condition), and they don't have the condition.
3. **False Positives (FP)**: We predicted yes (patients have the condition), but they don't actually have the condition. Also known as "Type I error" or "False Alarm".
4. **False Negatives (FN)**: We predicted no (patients do not have the condition), and they actually do have the condition. Also known as "Type II error" or "Miss".
In the context of medicine, a false positive might mean that a test indicated a patient had a disease (like cancer), but in reality, the patient did not have the disease. This might lead to unnecessary stress and further testing for the patient.
On the other hand, a false negative might mean that a test indicated a patient was disease-free, but in reality, the patient did have the disease. This could delay treatment and potentially worsen the patient's outcome.
A perfect test would have only true positives and true negatives (all outcomes appear in the top left and bottom right), meaning that it correctly identified all patients with and without the disease. Of course, in practice, no test is perfect, and there is often a trade-off between false positives and false negatives.
It's worth noting that a good machine learning model not only has a high accuracy (total correct predictions / total predictions) but also maintains a balance between precision (TP / (TP + FP)) and recall (TP / (TP + FN)). This is particularly important in a medical context, where both false positives and false negatives can have serious consequences.
Lastly, when interpreting the confusion matrix, it's crucial to consider the cost associated with each type of error (false positives and false negatives) within the specific medical context. Sometimes, it's more crucial to minimize one type of error over the other. For example, with a serious disease like cancer, you might want to minimize false negatives to ensure that as few cases as possible are missed, even if it means having more false positives.
""")
st.write(plot_roc_curve(y_true, y_scores))
with st.expander("What is an ROC curve?"):
st.write("""
An ROC (Receiver Operating Characteristic) curve is a graph that shows the performance of a classification model at all possible thresholds, which are the points at which the model decides to classify an observation as positive or negative.
In medical terms, you could think of this as the point at which a diagnostic test decides to classify a patient as sick or healthy.
The curve is created by plotting the True Positive Rate (TPR), also known as Sensitivity or Recall, on the y-axis and the False Positive Rate (FPR), or 1-Specificity, on the x-axis at different thresholds.
In simpler terms:
- **True Positive Rate (TPR)**: Out of all the actual positive cases (for example, all the patients who really do have a disease), how many did our model correctly identify?
- **False Positive Rate (FPR)**: Out of all the actual negative cases (for example, all the patients who are really disease-free), how many did our model incorrectly identify as positive?
The closer the curve follows the left-hand border and then the top border of the ROC space, the more accurate the test. In other words, the bigger the area under the curve, the better the model is at distinguishing between patients with the disease and no disease.
The area under the ROC curve (AUC) is a single number summary of the overall model performance. The value can range from 0 to 1, where:
- **AUC = 0.5**: This is no better than a random guess, or flipping a coin. It's not an effective classifier.
- **AUC < 0.5**: This means the model is worse than a random guess. But, by reversing its decision, we can get AUC > 0.5.
- **AUC = 1**: The model has perfect accuracy. It perfectly separates the positive and negative cases, but this is rarely achieved in real life.
In clinical terms, an AUC of 0.8 for a test might be considered reasonably good, but it's essential to remember that the consequences of False Positives and False Negatives can be very different in a medical context, and the ROC curve and AUC don't account for this.
Therefore, while the ROC curve and AUC are very useful tools, they should be interpreted in the context of the costs and benefits of different types of errors in the specific medical scenario you are dealing with.""")
st.write(plot_pr_curve(y_true, y_scores))
with st.expander("What is a PR curve?"):
st.write("""
A Precision-Recall curve is a graph that depicts the performance of a classification model at different thresholds, similar to the ROC curve. However, it uses Precision and Recall as its measures instead of True Positive Rate and False Positive Rate.
In the context of medicine:
- **Recall (or Sensitivity)**: Out of all the actual positive cases (for example, all the patients who really do have a disease), how many did our model correctly identify? It's the ability of the test to find all the positive cases.
- **Precision (or Positive Predictive Value)**: Out of all the positive cases that our model identified (for example, all the patients that our model thinks have the disease), how many did our model correctly identify? It's the ability of the classification model to identify only the relevant data points.
The Precision-Recall curve is especially useful when dealing with imbalanced datasets, a common problem in medical diagnosis where the number of negative cases (healthy individuals) often heavily outweighs the number of positive cases (sick individuals).
A model with perfect precision (1.0) and recall (1.0) will have a curve that reaches to the top right corner of the plot. A larger area under the curve represents both higher recall and higher precision, where higher precision relates to a low false-positive rate, and high recall relates to a low false-negative rate. High scores for both show that the classifier is returning accurate results (high precision), and returning a majority of all positive results (high recall).
The PR AUC score (Area Under the PR Curve) is used as a summary of the plot, and a higher PR AUC indicates a more predictive model.
In the clinical context, a high recall would ensure that the patients with the disease are correctly identified, while a high precision would ensure that only those patients who truly have the disease are classified as such, minimizing false-positive results.
However, there is usually a trade-off between precision and recall. Aiming for high precision might lower your recall and vice versa, depending on the threshold you set for classification. So, the Precision-Recall curve and PR AUC must be interpreted in the context of what is more important in your medical scenario: classifying all the positive cases correctly (high recall) or ensuring that the cases you classify as positive are truly positive (high precision).""")
@st.cache_data
def plot_pr_curve(y_true, y_scores):
precision, recall, _ = precision_recall_curve(y_true, y_scores)
pr_auc = auc(recall, precision)
fig, ax = plt.subplots()
ax.plot(recall, precision, label=f'PR curve (AUC = {pr_auc:.2f})')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision-Recall Curve')
plt.legend(loc="lower right")
st.pyplot(fig)
@st.cache_data
def get_categorical_and_numerical_cols(df):
# Initialize empty lists for categorical and numerical columns
categorical_cols = []
numeric_cols = []
# Go through each column in the dataframe
for col in df.columns:
# If the column data type is numerical and has more than two unique values, add it to the numeric list
if np.issubdtype(df[col].dtype, np.number) and len(df[col].unique()) > 2:
numeric_cols.append(col)
# Otherwise, add it to the categorical list
else:
categorical_cols.append(col)
# Sort the lists
numeric_cols.sort()
categorical_cols.sort()
return numeric_cols, categorical_cols
@st.cache_data
def plot_confusion_matrix_old(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
fig, ax = plt.subplots(dpi=100) # Set DPI for better clarity
# Plot the heatmap
sns.heatmap(cm, annot=True, fmt="d", cmap='Blues', ax=ax, annot_kws={"size": 16}) # Set font size
# Labels, title, and ticks
ax.set_ylabel('Actual', fontsize=12)
ax.set_xlabel('Predicted', fontsize=12)
ax.set_title('Confusion Matrix', fontsize=14)
# Fix for the bottom cells getting cut off
plt.subplots_adjust(bottom=0.2)
return fig
@st.cache_data
def plot_confusion_matrix(y_true, y_pred):
# Compute the confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Create the ConfusionMatrixDisplay object
cmd = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=['Class 0', 'Class 1'])
# Create a new figure and axis for the plot
fig, ax = plt.subplots(dpi=100)
# Plot the confusion matrix using the `plot` method
cmd.plot(ax=ax, cmap='Blues', values_format='d')
# Customize the plot if needed
ax.set_title('Confusion Matrix')
ax.set_xlabel('Predicted')
ax.set_ylabel('Actual')
return fig
@st.cache_data
def plot_roc_curve(y_true, y_scores):
fpr, tpr, _ = roc_curve(y_true, y_scores)
roc_auc = roc_auc_score(y_true, y_scores)
fig, ax = plt.subplots()
ax.plot(fpr, tpr, label='ROC curve (AUC = %0.2f)' % roc_auc)
ax.plot([0, 1], [0, 1], 'k--', label='Random guess')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.xlim([-0.02, 1])
plt.ylim([0, 1.02])
plt.legend(loc="lower right")
return fig
@st.cache_data
def preprocess(df, target_col):
included_cols = []
excluded_cols = []
for col in df.columns:
if col != target_col: # Exclude target column from preprocessing
if df[col].dtype == 'object':
if len(df[col].unique()) == 2: # Bivariate case
most_freq = df[col].value_counts().idxmax()
least_freq = df[col].value_counts().idxmin()
# Update the mapping to include 'F' as 0 and 'M' as 1
df[col] = df[col].map({most_freq: 0, least_freq: 1, 'F': 0})
included_cols.append(col)
else: # Multivariate case
excluded_cols.append(col)
elif df[col].dtype in ['int64', 'float64']: # Numerical case
if df[col].isnull().values.any():
mean_imputer = SimpleImputer(strategy='mean')
df[col] = mean_imputer.fit_transform(df[[col]])
st.write(f"Imputed missing values in {col} with mean.")
included_cols.append(col)
return df[included_cols], included_cols, excluded_cols
@st.cache_data
def preprocess_old(df, target_col):
included_cols = []
excluded_cols = []
for col in df.columns:
if col != target_col: # Exclude target column from preprocessing
if df[col].dtype == 'object':
if len(df[col].unique()) == 2: # Bivariate case
most_freq = df[col].value_counts().idxmax()
least_freq = df[col].value_counts().idxmin()
df[col] = df[col].map({most_freq: 0, least_freq: 1})
included_cols.append(col)
else: # Multivariate case
excluded_cols.append(col)
elif df[col].dtype in ['int64', 'float64']: # Numerical case
if df[col].isnull().values.any():
mean_imputer = SimpleImputer(strategy='mean')
df[col] = mean_imputer.fit_transform(df[[col]])
st.write(f"Imputed missing values in {col} with mean.")
included_cols.append(col)
# st.write(f"Included Columns: {included_cols}")
# st.write(f"Excluded Columns: {excluded_cols}")
return df[included_cols], included_cols, excluded_cols
@st.cache_data
def create_boxplot(df, numeric_col, categorical_col, show_points=False):
if numeric_col and categorical_col:
fig, ax = plt.subplots()
# Plot the notched box plot
sns.boxplot(x=categorical_col, y=numeric_col, data=df, notch=True, ax=ax)
if show_points:
# Add the actual data points on the plot
sns.swarmplot(x=categorical_col, y=numeric_col, data=df, color=".25", ax=ax)
# Add a title to the plot
ax.set_title(f'Box Plot of {numeric_col} by {categorical_col}')
st.pyplot(fig)
return fig
@st.cache_data
def create_violinplot(df, numeric_col, categorical_col):
if numeric_col and categorical_col:
fig, ax = plt.subplots()
# Plot the violin plot
sns.violinplot(x=categorical_col, y=numeric_col, data=df, ax=ax)
# Add a title to the plot
ax.set_title(f'Violin Plot of {numeric_col} by {categorical_col}')
st.pyplot(fig)
return fig
@st.cache_data
def create_scatterplot(df, scatter_x, scatter_y):
if scatter_x and scatter_y:
fig, ax = plt.subplots()
# Plot the scatter plot
sns.regplot(x=scatter_x, y=scatter_y, data=df, ax=ax)
# Calculate the slope and intercept of the regression line
slope, intercept = np.polyfit(df[scatter_x], df[scatter_y], 1)
# Add the slope and intercept as a text annotation on the plot
ax.text(0.05, 0.95, f'y={slope:.2f}x+{intercept:.2f}', transform=ax.transAxes)
ax.set_title("Scatter Plot for " + scatter_y + " vs " + scatter_x)
st.pyplot(fig)
with st.expander('What is a scatter plot?'):
st.write("""
A scatterplot is a type of plot that displays values for typically two variables for a set of data. It's used to visualize the relationship between two numerical variables, where one variable is on the x-axis and the other variable is on the y-axis. Each point on the plot represents an observation in your dataset.
**Which types of variables are appropriate for the x and y axes?**
Both the x and y axes of a scatterplot are typically numerical variables. For example, one might be "Patient Age" (on the x-axis) and the other might be "Blood Pressure" (on the y-axis). Each dot on the scatterplot then represents a patient's age and corresponding blood pressure.
However, the variables used do not have to be numerical. They could be ordinal categories, such as stages of a disease, which have a meaningful order.
The choice of which variable to place on each axis doesn't usually matter much for exploring relationships, but traditionally the independent variable (the one you control or think is influencing the other) is placed on the x-axis, and the dependent variable (the one you think is being influenced) is placed on the y-axis.
**What does a regression line mean when added to a scatterplot?**
A regression line (or line of best fit) is a straight line that best represents the data on a scatter plot. This line may pass through some of the points, none of the points, or all of the points. It's a way of modeling the relationship between the x and y variables.
In the context of a scatterplot, the regression line is used to identify trends and patterns between the two variables. If the data points and the line are close, it suggests a strong correlation between the variables.
The slope of the regression line also tells you something important: for every unit increase in the variable on the x-axis, the variable on the y-axis changes by the amount of the slope. For example, if we have patient age on the x-axis and blood pressure on the y-axis, and the slope of the line is 2, it would suggest that for each year increase in age, we expect blood pressure to increase by 2 units, on average.
However, keep in mind that correlation does not imply causation. Just because two variables move together, it doesn't mean that one is causing the other to change.
For medical students, think of scatterplots as a way to visually inspect the correlation between two numerical variables. It's a way to quickly identify patterns, trends, and outliers, and to formulate hypotheses for further testing.""")
return fig
# Function to replace missing values
@st.cache_data
def replace_missing_values(df, method):
# Differentiate numerical and categorical columns
num_cols = df.select_dtypes(include=np.number).columns.tolist()
cat_cols = df.select_dtypes(include=['object', 'category']).columns.tolist()
if method == 'drop':
df = df.dropna()
elif method == 'zero':
df[num_cols] = df[num_cols].fillna(0)
elif method == 'mean':
df[num_cols] = df[num_cols].fillna(df[num_cols].mean())
elif method == 'median':
df[num_cols] = df[num_cols].fillna(df[num_cols].median())
elif method == 'mode':
df[cat_cols] = df[cat_cols].fillna(df[cat_cols].mode().iloc[0])
elif method == 'mice':
imp = mice.MICEData(df[num_cols]) # only apply to numerical columns
df[num_cols] = imp.data
st.session_state.df = df
return df
# This function will be cached
@st.cache_data
def load_data(file_path):
data = pd.read_csv(file_path)
return data
@st.cache_data
def analyze_dataframe(df):
# Analyzing missing values
missing_values = df.isnull().sum()
# Analyzing outliers using the Z-score
# (you might want to use a different method for identifying outliers)
z_scores = np.abs((df - df.mean()) / df.std())
outliers = (z_scores > 3).sum()
# Analyzing data types
data_types = df.dtypes
# Analyzing skewness for numeric columns
skewness = df.select_dtypes(include=[np.number]).apply(lambda x: x.skew())
# Analyzing cardinality in categorical columns
cardinality = df.select_dtypes(include=['object', 'category']).nunique()
return missing_values, outliers, data_types, skewness, cardinality
# Function to plot pie chart
def plot_pie(df, col_name):
plt.figure(figsize=(10, 8)) # set the size of the plot
df[col_name].value_counts().plot(kind='pie', autopct='%1.1f%%')
# Add title
plt.title(f'Distribution for {col_name}')
return plt
# Function to summarize categorical data
@st.cache_data
def summarize_categorical(df):
# Select only categorical columns
cat_df = df.select_dtypes(include=['object', 'category'])
# If there are no categorical columns, return None
if cat_df.empty:
st.write("The DataFrame does not contain any categorical columns.")
return None
# Create a list to store dictionaries for each column's summary
summary_data = []
for col in cat_df.columns:
# Number of unique values
unique_count = df[col].nunique()
# Most frequent category and its frequency
most_frequent = df[col].mode()[0]
freq_most_frequent = df[col].value_counts().iloc[0]
# Append the column summary as a dictionary to the list
summary_data.append({
'column': col,
'unique_count': unique_count,
'most_frequent': most_frequent,
'frequency_most_frequent': freq_most_frequent,
})
# Create the summary DataFrame from the list of dictionaries
summary = pd.DataFrame(summary_data)
summary.set_index('column', inplace=True)
return summary
# Function to plot correlation heatmap
def plot_corr(df):
df_copy = df.copy()
for col in df_copy.columns:
if df_copy[col].dtype == 'object': # Check if the column is categorical
unique_vals = df_copy[col].unique()
if len(unique_vals) == 2: # If the categorical variable has exactly 2 unique values
value_counts = df_copy[col].value_counts()
df_copy[col] = df_copy[col].map({value_counts.idxmax(): 0, value_counts.idxmin(): 1})
# Keep only numerical and binary categorical columns
df_copy = df_copy.select_dtypes(include=[np.number])
corr = df_copy.corr() # Compute pairwise correlation of columns
plt.figure(figsize=(12, 10)) # Set the size of the plot
sns.heatmap(corr, annot=True, cmap='coolwarm', cbar=True)
plt.title('Correlation Heatmap')
return plt
@st.cache_resource
def make_profile(df):
return ProfileReport(df, title="Profiling Report")
# Function to plot bar chart
def plot_categorical(df, col_name):
# Get frequency of categories
freq = df[col_name].value_counts()
# Create bar chart
plt.figure(figsize=(10, 6)) # set the size of the plot
plt.bar(freq.index, freq.values)
# Add title and labels
plt.title(f'Frequency of Categories for {col_name}')
plt.xlabel('Category')
plt.ylabel('Frequency')
return plt
def plot_numeric(df, col_name):
plt.figure(figsize=(10, 6)) # set the size of the plot
plt.hist(df[col_name], bins=30, alpha=0.5, color='blue', edgecolor='black')
# Add title and labels
plt.title(f'Distribution for {col_name}')
plt.xlabel(col_name)
plt.ylabel('Frequency')
return plt
@st.cache_data
def process_dataframe(df):
# Iterating over each column
for col in df.columns:
# Checking if the column is of object type (categorical)
if df[col].dtype == 'object':
# Getting unique values in the column
unique_values = df[col].unique()
# If the column has exactly 2 unique values
if len(unique_values) == 2:
# Counting the occurrences of each value
value_counts = df[col].value_counts()
# Getting the most and least frequent values
most_frequent = value_counts.idxmax()
least_frequent = value_counts.idxmin()
# Replacing the values and converting to integer
df[col] = df[col].replace({most_frequent: 0, least_frequent: 1}).astype(int)
return df
st.title("AutoAnalyzer")
with st.expander('Please Read: Using AutoAnalyzer'):
st.info("""Be sure your data is first in a 'tidy' format. Use the demo datasets for examples. (*See https://tidyr.tidyverse.org/ for more information.*)
Follow the steps listed in the sidebar on the left. After your exploratory analysis is complete, try the machine learning tab to see if you can predict a target variable.""")
st.warning("This is not intended to be a comprehensive tool for data analysis. It is meant to be a starting point for data exploration and machine learning. Do not upload PHI. Clone the Github repository and run locally without the chatbot if you have PHI.")
st.markdown('[Github Repository](https://github.com/DrDavidL/auto_analyze)')
# """)
st.write("Author: David Liebovitz, MD, Northwestern University")
st.write("Last updated 8/28/23")
api_key = fetch_api_key()
if api_key:
os.environ['OPENAI_API_KEY'] = api_key
tab1, tab2 = st.tabs(["Data Exploration", "Machine Learning"])
gpt_version = st.sidebar.radio("Select GPT model:", ("GPT-3.5 ($)", "GPT-3.5 16k ($$)", "GPT-4 ($$$$)"), index=0)
if gpt_version == "GPT-3.5 ($)":
selected_model ="gpt-3.5-turbo"
if gpt_version == "GPT-4 ($$$$)":
selected_model = "gpt-4"
if gpt_version == "GPT-3.5 16k ($$)":
selected_model = "gpt-3.5-turbo-16k"
# if openai.api_key is None:
# os.environ["OPENAI_API_KEY"] = fetch_api_key()
# openai.api_key = os.getenv("OPENAI_API_KEY")
with tab1:
# st.sidebar.subheader("Upload your data")
st.sidebar.subheader("Step 1: Upload your data or view a demo dataset")
demo_or_custom = st.sidebar.selectbox("Upload a CSV file. NO PHI - use only anonymized data", ("Demo 1 (diabetes)", "Demo 2 (cancer)", "Demo 3 (missing data example)", "Demo 4 (time series -CHF deaths)", "Demo 5 (stroke)", "Generate Data", "CSV Upload", "Modified Dataframe"), index = 0)
if demo_or_custom == "CSV Upload":
uploaded_file = st.sidebar.file_uploader("Choose a CSV file", type="csv")
if uploaded_file:
st.session_state.df = load_data(uploaded_file)
if demo_or_custom == 'Demo 1 (diabetes)':
file_path = "data/predictdm.csv"
st.sidebar.markdown("[About Demo 1 dataset](https://data.world/informatics-edu/diabetes-prediction)")
st.session_state.df = load_data(file_path)
if demo_or_custom == 'Demo 2 (cancer)':
file_path = "data/breastcancernew.csv"
st.sidebar.write("[About Demo 2 dataset](https://data.world/marshalldatasolution/breast-cancer)")
st.session_state.df = load_data(file_path)
if demo_or_custom == 'Demo 3 (missing data example)':
file_path = "data/missing_data.csv"
st.sidebar.markdown("[About Demo 3 dataset](https://www.lshtm.ac.uk/research/centres-projects-groups/missing-data#dia-missing-data)")
st.session_state.df = load_data(file_path)
if demo_or_custom == 'Modified Dataframe':
# st.sidebar.markdown("Using the dataframe from the previous step.")
if len(st.session_state.modified_df) == 0:
st.sidebar.warning("No saved dataframe; using demo dataset 1.")
file_path = "data/predictdm.csv"
st.sidebar.markdown("[About Demo 1 dataset](https://data.world/informatics-edu/diabetes-prediction)")
st.session_state.df = load_data(file_path)
else:
st.session_state.df = st.session_state.modified_df
# st.sidebar.write("Download the modified dataframe as a CSV file.")
modified_csv = st.session_state.modified_df.to_csv(index=False)
st.sidebar.download_button(
label="Download Modified Dataset!",
data=modified_csv,
file_name="modified_data.csv",
mime="text/csv",
)
if demo_or_custom == 'Generate Data':
if check_password():
user_input = st.sidebar.text_area("Enter comma or space separated names for columns, e.g., Na, Cr, WBC, A1c, SPB, Diabetes:")
if "," in user_input:
user_list = user_input.split(",")
elif " " in user_input:
user_list = user_input.split()
else:
user_list = [user_input]
# Remove leading/trailing whitespace from each item in the list
user_columns = [item.strip() for item in user_list]
user_rows = st.sidebar.number_input("Enter approx number of rows (max 100).", min_value=1, max_value=100, value=10, step=1)
if st.sidebar.button("Generate Data"):
st.session_state.df, st.session_state.gen_csv = generate_df(user_columns, user_rows, selected_model)
st.info("Here are the first 5 rows of your generated data. Use the tools in the sidebar to explore your new dataset! And, download and save your new CSV file from the sidebar!")
st.write(st.session_state.df.head())
if demo_or_custom == 'Demo 4 (time series -CHF deaths)':
file_path = "data/S1Data.csv"
st.sidebar.markdown("[About Demo 4 dataset](https://plos.figshare.com/articles/dataset/Survival_analysis_of_heart_failure_patients_A_case_study/5227684/1)")
st.session_state.df = load_data(file_path)
if demo_or_custom == 'Demo 5 (stroke)':
file_path = "data/healthcare-dataset-stroke-data.csv"
st.sidebar.markdown("[About Demo 5 dataset](https://www.kaggle.com/fedesoriano/stroke-prediction-dataset)")
st.session_state.df = load_data(file_path)
with st.sidebar:
if st.session_state.gen_csv is not None:
# st.warning("Save your generated data!")
st.download_button(
label="Download Generated Data!",
data=st.session_state.gen_csv,
file_name="patient_data.csv",
mime="text/csv",
)
st.subheader("Step 2: Assess Data Readiness")
check_preprocess = st.checkbox("Assess dataset readiness", key = "Preprocess now needed")
needs_preprocess = st.checkbox("Select if dataset fails readiness", key = "Open Preprocess")
filter_data = st.checkbox("Filter data if needed (Switch to Modified Dataframe after filtering)", key = "Filter data")
st.subheader("Step 3: Tools for Analysis")
col1, col2 = st.columns(2)
with col1:
header = st.checkbox("Show header (top 5 rows of data)", key = "show header")
summary = st.checkbox("Summary (numerical data)", key = "show data")
summary_cat = st.checkbox("Summary (categorical data)", key = "show summary cat")
show_table = st.checkbox("Create a Table 1", key = "show table")
show_scatter = st.checkbox("Scatterplot", key = "show scatter")
view_full_df = st.checkbox("View Dataset", key = "view full df")
binary_categ_analysis = st.checkbox("Categorical outcome analysis (Cohort or case-control datasets)", key = "binary categ analysis")
activate_chatbot = st.checkbox("Activate Chatbot (select specific bot on main window)", key = "activate chatbot")
with col2:
barchart = st.checkbox("Bar chart (categorical data)", key = "show barchart")
histogram = st.checkbox("Histogram (numerical data)", key = "show histogram")
piechart = st.checkbox("Pie chart (categorical data)", key = "show piechart")
show_corr = st.checkbox("Correlation heatmap", key = "show corr")
box_plot = st.checkbox("Box plot", key = "show box")
violin_plot = st.checkbox("Violin plot", key = "show violin")
mult_linear_reg = st.checkbox("Multiple linear regression", key = "show mult linear reg")
perform_pca = st.checkbox("Perform PCA", key = "show pca")
survival_curve = st.checkbox("Survival curve (need duration column)", key = "show survival")
cox_ph = st.checkbox("Cox Proportional Hazards (need duration column)", key = "show cox ph")
full_analysis = st.checkbox("*(Takes 1-2 minutes*) **Download a Full Analysis** (*Check **Alerts** with key findings.*)", key = "show analysis")
if filter_data:
current_df = st.session_state.df
st.session_state.modified_df = filter_dataframe(current_df)
st.write("Switch to Modified Dataframe (top left) to see the filtered data below and use in analysis tools.")
st.session_state.modified_df
if mult_linear_reg:
st.subheader("Multiple Linear Regression")
st.warning("This tool is for use with numerical data only; binary categorical variables are updated to 1 and 0 and explained below if needed.")
# Get column names for time and event from the user
temp_df_mlr = st.session_state.df.copy()
numeric_columns_mlr = all_numerical(temp_df_mlr)
x_col = st.multiselect('Select the columns for x', numeric_columns_mlr, numeric_columns_mlr[1])
y_col = st.selectbox('Select the column for y', numeric_columns_mlr)
# Convert the columns to numeric values
# temp_df_mlr[x_col] = temp_df_mlr[x_col].astype(float)
# temp_df_mlr[y_col] = temp_df_mlr[y_col].astype(float)
# x_col_array = np.array(x_col)
# y_col_array = np.array(y_col)
# x_col_reshaped = x_col_array.reshape(-1, 1)
# y_col_reshaped = y_col_array.reshape(-1, 1)
# Plot the survival curve
try:
mult_linear_reg, mlr_report, intercept, coef = plot_mult_linear_reg(temp_df_mlr, temp_df_mlr[x_col], temp_df_mlr[y_col])
mlr_equation = generate_regression_equation(intercept, coef, x_col)
show_equation = st.checkbox("Show regression equation")
# mlr_report
if show_equation:
st.write(mlr_equation)
st.write("Download your cooefficients and intercept below.")
format_mlr = st.radio("Select the format for your report:", ('csv', 'json', 'html', ), key = 'formal_mlr', horizontal = True, )
df_download_options(mlr_report, 'Your Multiple Linear Regression', format_mlr)
except:
st.error("Please select at least one column for x and one column for y.")
# save_image(mult_linear_reg, 'mult_linear_reg.png')
# df_download_options(mult_linear_reg, 'csv')
with st.expander("What is a Multiple Linear Regression?"):
st.write(mult_linear_reg_explanation)
if cox_ph:
df = st.session_state.df
# Select Predictor Columns
st.markdown("## Cox Analysis: Select Columns")
categ_columns_cox = all_categorical(df)
numeric_columns_cox = all_numerical(df)
event_col = st.selectbox('Select the event column', categ_columns_cox, key='event_col')
selected_columns_cox = st.multiselect("Choose your feature columns", numeric_columns_cox)
duration_col = st.selectbox('Select the duration column', numeric_columns_cox)
if st.button("Analyze", key="analyze"):
if len(selected_columns_cox) < 1:
st.error("Select at least one column!")
else:
# Shift DataFrame to Selected Columns
cph_data = df[selected_columns_cox + [event_col] + [duration_col]]
# Define Event & Duration Columns here
# Assuming 'event' as Event Column & 'duration' as Duration Column
# Please change as per your data
# st.write(duration_col)
# st.write(cph_data[duration_col])
# st.write(cph_data)
cph = CoxPHFitter(penalizer=0.1)
cph.fit(cph_data, duration_col=duration_col, event_col=event_col)
summary_cox = cph.summary
st.session_state.df_to_download = summary_cox
# st.session_state.df_to_download = summary_df
st.subheader("Summary of the Cox PH Analysis")
st.info("Note, the exp(coef) column is the hazard ratio for each variable.")
# Display summary DataFrame
st.dataframe(summary_cox)
else:
st.text("Select columns & hit 'Analyze'.")
if st.session_state.df_to_download is not None:
format_cox = st.radio("Select the format for your report:", ('csv', 'json', 'html', ), key ='format_cox', horizontal = True, )
df_download_options(st.session_state.df_to_download, 'cox_ph_summary', format_cox)
with st.expander("What is a Cox Proportional Hazards Analysis?"):
st.write(cox)
if survival_curve:
# Get column names for time and event from the user
st.subheader("Survival Curve")
st.warning("This tool is for use with survival analysis data. Any depiction will not make sense if 'time' isn't a column for your dataset")
time_col = st.selectbox('Select the column for time', st.session_state.df.columns)
event_col = st.selectbox('Select the column for event', st.session_state.df.columns)
# Plot the survival curve
surv_curve = plot_survival_curve(st.session_state.df, time_col, event_col)
save_image(surv_curve, 'survival_curve.png')
with st.expander("What is a Kaplan-Meier Curve?"):
st.write(kaplan_meier)
if binary_categ_analysis:
st.subheader("""
Choose your exposures and outcomes.
""")
st.info('Note - categories with more than 15 unique values will not be used.')
var1, var2 = st.columns(2)
s_categorical_cols = st.session_state.df.select_dtypes(include=['object']).columns.tolist()
numeric_cols = [col for col in st.session_state.df.columns if st.session_state.df[col].nunique() == 2 and st.session_state.df[col].dtype != 'object']
filtered_categorical_cols = [col for col in s_categorical_cols if st.session_state.df[col].nunique() <= 15]
sd_categorical_cols = filtered_categorical_cols + numeric_cols
if len(sd_categorical_cols) > 1:
sd_exposure = var1.selectbox('Select a categorical column as the exposure:', sd_categorical_cols, index = 0)
sd_outcome = var2.selectbox('Select a categorical column as the outcome:', sd_categorical_cols, index = 1)
sd_exposure_values = var1.multiselect('Select one or more values for the exposure:', st.session_state.df[sd_exposure].unique().tolist(), [st.session_state.df[sd_exposure].unique().tolist()[1]])
sd_outcome_values = var2.multiselect('Select one or more values for the outcome:', st.session_state.df[sd_outcome].unique().tolist(), [st.session_state.df[sd_outcome].unique().tolist()[1]])
# Create a temporary dataframe to store the modified values
temp_df = st.session_state.df.copy()
# Replace the selected exposure values with 1 and others with 0
temp_df[sd_exposure] = temp_df[sd_exposure].apply(lambda x: 1 if x in sd_exposure_values else 0)
# Replace the selected outcome values with 1 and others with 0
temp_df[sd_outcome] = temp_df[sd_outcome].apply(lambda x: 1 if x in sd_outcome_values else 0)
cohort_or_case = st.radio("Choose an approach", ("Cohort Study", "Case Control Study"))
# Generate the 2x2 table
table = generate_2x2_table(temp_df, sd_exposure, sd_outcome)
if cohort_or_case == "Cohort Study":
st.write("For use with cohort study data.")
# Calculate relative risk, ARR, and NNT
tn = table.iloc[0, 0]
fp = table.iloc[0, 1]
fn = table.iloc[1, 0]
tp = table.iloc[1, 1]
rr, arr, nnt = calculate_rr_arr_nnt(tn, fp, fn, tp)
# Display the 2x2 table and analysis results
st.subheader("2x2 Table")
st.write(table)
st.subheader("Results")
st.write("Relative Risk (RR):", round(rr,2))
st.write("Absolute Risk Reduction (ARR):", round(arr,2))
st.write("Number Needed to Treat (NNT):", round(nnt, 2))
if cohort_or_case == "Case Control Study":
st.write("For use with case-control data.")
# Calculate odds and odds ratio
odds_cases, odds_controls, odds_ratio = calculate_odds(table)
# Display the 2x2 table and analysis results
st.subheader("2x2 Table")
st.write(table)
st.subheader("Results")
st.write("Odds in cases:", round(odds_cases,2))
st.write("Odds in controls:", round(odds_controls, 2))
st.write("Odds Ratio:", round(odds_ratio, 2))
else:
st.subheader("Insufficient categorical variables found in the data.")
if needs_preprocess:
st.info("Data Preprocessing Tools - *Assess Data Readiness **first**. Use only if needed.*")
st.write("Step 1: Make a copy of your dataset to modify by clicking the button below.")
if st.button("Copy dataset"):
st.session_state.modified_df = st.session_state.df
st.write("Step 2: Select 'Modified Dataframe' in Step 1 of the sidebar to use the dataframe you just copied.")
st.write("Step 3: Select a method to impute missing values in your dataset. Built in checks to apply only to applicable data types.")
method = st.selectbox("Choose a method to replace missing values", ("Select here!", "drop", "zero", "mean", "median", "mode", "mice"))
if st.button('Apply the Method to Replace Missing Values'):
st.session_state.modified_df = replace_missing_values(st.session_state.modified_df, method)
st.write("Recheck data readiness to see if you are ready to proceed with analysis.")
if activate_chatbot:
st.subheader("Chatbot Teacher")
st.warning("First be sure to activate the right chatbot for your needs.")
chat_context = st.radio("Choose an approach", ("Ask questions (no plots)", "Generate Plots", "Teach about data science"))
try:
x = st.session_state.df
except NameError:
st.warning("Please upload a CSV file or choose a demo dataset")
else:
if activate_chatbot:
if check_password():
if chat_context == "Teach about data science":
start_chatbot1(selected_model)
if chat_context == "Ask questions (no plots)":
start_chatbot2(st.session_state.df, selected_model, key = "chatbot2 main")
if chat_context == "Generate Plots":
if selected_model == "gpt-3.5-turbo":
start_chatbot3(st.session_state.df, selected_model)
if selected_model == "gpt-3.5-turbo-16k":
start_chatbot3(st.session_state.df, selected_model)
if selected_model == "gpt-4":
start_plot_gpt4(st.session_state.df)
if summary:
st.info("Summary of numerical data")
sum_num_data =st.session_state.df.describe()
st.write(sum_num_data)
st.session_state.df_to_download = sum_num_data
if st.session_state.df_to_download is not None:
format_summary_num = st.radio("Select the format for your report:", ('csv', 'json', 'html', ), key = 'summary_num', horizontal = True, )
df_download_options(st.session_state.df_to_download, 'numerical_data_summary', format_summary_num)
if header:
st.info("First 5 Rows of Data")
st.write(st.session_state.df.head())
if full_analysis:
st.info("Full analysis of data")
with st.spinner("Working on the analysis..."):
st.session_state.full_profile = make_profile(st.session_state.df)
# profile = ProfileReport(df, title="Profiling Report")
st.write(f'Since this file is large, please download and then open the full report.')
st.download_button(
label="Download report",
data=st.session_state.full_profile.to_html(),
file_name='full_report.html',
mime='text/html',
)
# st_profile_report(profile)
if histogram:
st.info("Histogram of data")
numeric_cols, categorical_cols = get_categorical_and_numerical_cols(st.session_state.df)
selected_col = st.selectbox("Choose a column", numeric_cols, key = "histogram")
if selected_col:
plt = plot_numeric(st.session_state.df, selected_col)
st.pyplot(plt)
save_image(plt, 'histogram.png')
with st.expander("Expand for Python|Streamlit Code"):
st.code("""
import pandas as pd
import matplotlib.pyplot as plt
# Function to get categorical and numerical columns from a dataframe
def get_categorical_and_numerical_cols(df):
numeric_cols = df.select_dtypes(include=['int', 'float']).columns.tolist()
categorical_cols = df.select_dtypes(include=['object']).columns.tolist()
return numeric_cols, categorical_cols
# Function to plot a histogram of a selected numeric column
def plot_numeric(df, column):
plt.figure(figsize=(10, 6))
plt.hist(df[column], bins=20, color='skyblue')
plt.xlabel(column)
plt.ylabel('Frequency')
plt.title(f'Histogram of {column}')
return plt
# Assuming st.session_state.df contains the dataframe
# Display info message
print("Histogram of data")
# Get numeric and categorical columns
numeric_cols, categorical_cols = get_categorical_and_numerical_cols(st.session_state.df)
# Display selectbox to choose a column
selected_col = input("Choose a column: ")
# Check if a column is selected
if selected_col:
# Plot histogram and display
plt = plot_numeric(st.session_state.df, selected_col)
plt.show()
""")
if barchart:
# st.info("Barchart for categorical data")
numeric_cols, categorical_cols = get_categorical_and_numerical_cols(st.session_state.df)
cat_selected_col = st.selectbox("Choose a column", categorical_cols, key = "bar_category")
if cat_selected_col:
plt = plot_categorical(st.session_state.df, cat_selected_col)
st.pyplot(plt)
save_image(plt, 'bar_chart.png')
with st.expander("Expand for Python|Streamlit Code"):
st.code("""
import matplotlib.pyplot as plt
import pandas as pd
# Function to get categorical and numerical columns from the dataframe
def get_categorical_and_numerical_cols(df):
numeric_cols = []
categorical_cols = []
for col in df.columns:
if df[col].dtype == 'object':
categorical_cols.append(col)
else:
numeric_cols.append(col)
return numeric_cols, categorical_cols
# Function to plot the categorical data
def plot_categorical(df, column):
plt.figure(figsize=(10, 6))
df[column].value_counts().plot(kind='bar')
plt.xlabel(column)
plt.ylabel('Count')
plt.title(f'Bar Chart for {column}')
plt.xticks(rotation=45)
plt.tight_layout()
return plt
# Get the numeric and categorical columns from the dataframe
numeric_cols, categorical_cols = get_categorical_and_numerical_cols(df)
# Select a column from the categorical columns
cat_selected_col = input("Choose a column: ")
# Check if a column is selected
if cat_selected_col in categorical_cols:
# Plot the categorical data
plt = plot_categorical(df, cat_selected_col)
plt.show()
""")
if show_corr:
st.info("Correlation heatmap")
plt = plot_corr(st.session_state.df)
st.pyplot(plt)
save_image(plt, 'heatmap.png')
with st.expander("What is a correlation heatmap?"):
st.write("""A correlation heatmap is a graphical representation of the correlation matrix, which is a table showing correlation coefficients between sets of variables. Each cell in the table shows the correlation between two variables. In the heatmap, correlation coefficients are color-coded, where the intensity of the color represents the magnitude of the correlation coefficient.
In your demo dataset heatmap, red signifies a high positive correlation of 1.0, which means the variables move in the same direction. If one variable increases, the other variable also increases. Darker blue, at the other end, represents negative correlation (close to -0.06 in your case), meaning the variables move in opposite directions. If one variable increases, the other variable decreases.
The correlation values appear in each square, giving a precise numeric correlation coefficient along with the visualized color intensity.
**Why are correlation heatmaps useful?**
Correlation heatmaps are useful to determine the relationship between different variables. In the field of medicine, this can help identify risk factors for diseases, where variables could be different health indicators like age, cholesterol level, blood pressure, etc.
**Understanding correlation values:**
Correlation coefficients range from -1 to 1:
- A correlation of 1 means a perfect positive correlation.
- A correlation of -1 means a perfect negative correlation.
- A correlation of 0 means there is no linear relationship between the variables.
It's important to note that correlation doesn't imply causation. While a correlation can suggest a relationship between two variables, it doesn't mean that changes in one variable cause changes in another.
Also, remember that correlation heatmaps are based on linear relationships between variables. If variables have a non-linear relationship, the correlation coefficient may not capture their relationship accurately.
For medical students, think of correlation heatmaps as a quick way to visually identify relationships between multiple variables at once. This can help guide your understanding of which variables may be important to consider together in further analyses.""")
st.code("""import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Function to plot correlation heatmap
def plot_corr(df):
# Compute correlation matrix
corr = df.corr()
# Generate a mask for the upper triangle
mask = np.triu(np.ones_like(corr, dtype=bool))
# Set up the matplotlib figure
fig, ax = plt.subplots(figsize=(10, 8))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(230, 20, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=1, vmin=-1, center=0,
square=True, linewidths=.5, annot=True, fmt=".2f", ax=ax)
# Set plot title
ax.set_title("Correlation Heatmap")
return plt
# Load the dataframe
df = pd.read_csv("your_data.csv")
# Call the plot_corr function and display the correlation heatmap
plt = plot_corr(df)
plt.show()
""")
if summary_cat:
st.info("Summary of categorical data")
summary = summarize_categorical(st.session_state.df)
st.write(summary)
st.session_state.df_to_download = summary
if st.session_state.df_to_download is not None:
format_cat = st.radio("Select the format for your report:", ('csv', 'json', 'html', ), key = 'format_cat', horizontal = True, )
df_download_options(st.session_state.df_to_download, 'categorical_summary', format_cat)
if piechart:
st.info("Pie chart for categorical data")
numeric_cols, categorical_cols = get_categorical_and_numerical_cols(st.session_state.df)
# cat_options =[]
# columns = list(df.columns)
# for col in columns:
# if df[col].dtype != np.float64 and df[col].dtype != np.int64:
# cat_options.append(col)
cat_selected_col = st.selectbox("Choose a column", categorical_cols, key = "pie_category")
if cat_selected_col:
plt = plot_pie(st.session_state.df, cat_selected_col)
st.pyplot(plt)
save_image(plt, 'pie_chart.png')
if check_preprocess:
# st.write("Running readiness assessment...")
readiness_summary = assess_data_readiness(st.session_state.df)
# st.write("Readiness assessment complete.")
# Display the readiness summary using Streamlit
# Display the readiness summary using Streamlit
st.subheader("Data Readiness Summary")
try:
if readiness_summary['data_empty']:
st.write("The DataFrame is empty.")
else:
# Combine column information and readiness summary into a single DataFrame
column_info_df = pd.DataFrame.from_dict(
readiness_summary['columns'],
orient='index',
columns=['Data Type']
)
summary_df = pd.DataFrame.from_dict(
readiness_summary['missing_values'],
orient='index',
columns=['Missing Values']
)
summary_df['Data Type'] = column_info_df['Data Type']
# Display the combined table
st.write(summary_df)
if readiness_summary['missing_columns']:
st.write("Missing Columns:")
st.write(readiness_summary['missing_columns'])
if readiness_summary['inconsistent_data_types']:
st.write("Inconsistent Data Types:")
st.write(readiness_summary['inconsistent_data_types'])
if readiness_summary['data_ready']:
st.success("The data is ready for analysis!")
else:
st.warning("The data is not fully ready for analysis.")
except:
st.write("The DataFrame is isn't yet ready for readiness assessment. :) ")
# st.info("Check if you need to preprocess data")
# missing_values, outliers, data_types, skewness, cardinality = analyze_dataframe(df)
# st.write("Missing values")
# st.write(missing_values)
# st.write("Outliers")
# st.write(outliers)
# st.write("Data types")
# st.write(data_types)
# st.write("Skewness")
# st.write(skewness)
# st.write("Cardinality")
# st.write(cardinality)
if show_scatter:
st.info("Scatterplot")
numeric_cols, categorical_cols = get_categorical_and_numerical_cols(st.session_state.df)
# Filter numeric columns
# numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist()
numeric_cols.sort() # sort the list of columns alphabetically
# Filter categorical columns
# categorical_cols = df.select_dtypes(include=[object]).columns.tolist()
categorical_cols.sort() # sort the list of columns alphabetically
# Dropdown to select columns to visualize
col1, col2 = st.columns(2)
with col1:
scatter_x = st.selectbox('Select column for x axis:', numeric_cols)
with col2:
scatter_y = st.selectbox('Select column for y axis:', numeric_cols, index=1)
# Use st.beta_expander to hide or expand filtering options
with st.expander('Filter Options'):
# Filter for the remaining numerical column
remaining_cols = [col for col in numeric_cols if col != scatter_x and col != scatter_y]
if remaining_cols:
filter_col = st.selectbox('Select a numerical column to filter data:', remaining_cols)
if filter_col:
min_val, max_val = float(st.session_state.df[filter_col].min()), float(st.session_state.df[filter_col].max())
if np.isnan(min_val) or np.isnan(max_val):
st.write(f"Cannot filter by {filter_col} because it contains NaN values.")
else:
filter_range = st.slider('Select a range to filter data:', min_val, max_val, (min_val, max_val))
st.session_state.df = st.session_state.df[(st.session_state.df[filter_col] >= filter_range[0]) & (st.session_state.df[filter_col] <= filter_range[1])]
# Filter for the remaining categorical column
if categorical_cols:
filter_cat_col = st.selectbox('Select a categorical column to filter data:', categorical_cols)
if filter_cat_col:
categories = st.session_state.df[filter_cat_col].unique().tolist()
selected_categories = st.multiselect('Select categories to include in the data:', categories, default=categories)
st.session_state.df = st.session_state.df[st.session_state.df[filter_cat_col].isin(selected_categories)]
# Check if DataFrame is empty before creating scatterplot
if st.session_state.df.empty:
st.write("The current filter settings result in an empty dataset. Please adjust the filter settings.")
else:
scatterplot = create_scatterplot(st.session_state.df, scatter_x, scatter_y)
save_image(scatterplot, 'custom_scatterplot.png')
if box_plot:
# Call the function to get the lists of numerical and categorical columns
numeric_cols, categorical_cols = get_categorical_and_numerical_cols(st.session_state.df)
# Filter numeric columns
# numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist()
numeric_cols.sort() # sort the list of columns
# Filter categorical columns
# categorical_cols = df.select_dtypes(include=[object]).columns.tolist()
categorical_cols.sort() # sort the list of columns
# Dropdown to select columns to visualize
numeric_col = st.selectbox('Select a numerical column:', numeric_cols, key = "box_numeric")
categorical_col = st.selectbox('Select a categorical column:', categorical_cols, key = "box_category")
mybox = create_boxplot(st.session_state.df, numeric_col, categorical_col, show_points=False)
save_image(mybox, 'box_plot.png')
with st.expander('What is a box plot?'):
st.write("""Box plots (also known as box-and-whisker plots) are a great way to visually represent the distribution of data. They're particularly useful when you want to compare distributions between several groups. For example, you might want to compare the distribution of patients' ages across different diagnostic categories.
(Check out age and diabetes in the sample dataset.)
**Components of a box plot:**
A box plot is composed of several parts:
1. **Box:** The main part of the plot, the box, represents the interquartile range (IQR), which is the range between the 25th percentile (Q1, the lower edge of the box) and the 75th percentile (Q3, the upper edge of the box). The IQR contains the middle 50% of the data points.
2. **Median:** The line (or sometimes a dot) inside the box represents the median of the data - the value separating the higher half from the lower half of a data sample. It's essentially the 50th percentile.
3. **Whiskers:** The lines extending from the box (known as whiskers) indicate variability outside the IQR. Typically, they extend to the most extreme data point within 1.5 times the IQR from the box.
4. **Outliers:** Points plotted beyond the whiskers are considered outliers - unusually high or low values in comparison with the rest of the data.
**What is the notch used for?**
The notch in a notched box plot represents the confidence interval around the median. If the notches of two box plots do not overlap, it's a strong indication (though not absolute proof) that the medians differ. This can be a useful way to visually compare medians across groups.
For medical students, a good way to think about box plots might be in comparison to lab results. Just as lab results typically give a reference range and flag values outside of that range, a box plot gives a visual representation of the range of the data (through the box and whiskers) and flags outliers.
The notch, meanwhile, is a bit like the statistical version of a normal range for the median. If a notch doesn't overlap with the notch from another box plot, it's a sign that the medians might be significantly different. But just like lab results, statistical tests are needed to definitively say whether a difference is significant.
""")
if violin_plot:
# Call the function to get the lists of numerical and categorical columns
numeric_cols, categorical_cols = get_categorical_and_numerical_cols(st.session_state.df)
# Filter numeric columns
# numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist()
numeric_cols.sort() # sort the list of columns
# Filter categorical columns
# categorical_cols = df.select_dtypes(include=[object]).columns.tolist()
categorical_cols.sort() # sort the list of columns
# Dropdown to select columns to visualize
numeric_col = st.selectbox('Select a numerical column:', numeric_cols, key = "violin_numeric")
categorical_col = st.selectbox('Select a categorical column:', categorical_cols, key = "violin_category")
violin = create_violinplot(st.session_state.df, numeric_col, categorical_col)
save_image(violin, 'violin_plot.png')
with st.expander('What is a violin plot?'):
st.write("""Violin plots are a great visualization tool for examining distributions of data and they combine features from box plots and kernel density plots.
1. **Overall Shape**: The violin plot is named for its resemblance to a violin. The shape of the "violin" provides a visual representation of the distribution of the data. The width of the "violin" at any given point represents the density or number of data points at that level. This means a wider section indicates more data points lie in that range, while a narrower section means fewer data points. This is similar to a histogram but it's smoothed out, which can make the distribution clearer.
2. **Dot in the Middle**: This dot often represents the median of the data. The median is the middle point of the data. That means half of all data points are below this value and half are above it. In medicine, the median is often a more useful measure than the mean because it's less affected by outliers or unusually high or low values. For example, if you're looking at the age of patients, a single 100-year-old patient won't dramatically shift the median like it would the mean.
3. **Thicker Bar in the Middle**: This is an interquartile range (IQR), which captures the middle 50% of the data (from the 25th to the 75th percentile). The IQR can help you understand the spread of the central half of your data. If the IQR is small, it means the central half of your data points are clustered closely around the median. If the IQR is large, it means they're more spread out.
4. **Usage**: Violin plots are particularly helpful when you want to visualize the distribution of a numerical variable across different categories. For example, you might want to compare the distribution of patient ages in different diagnostic categories.
Remember, like any statistical tool, violin plots provide a simplified representation of the data and may not capture all nuances. For example, they usually show a smoothed distribution, which might hide unusual characteristics or outliers in the data. It's always important to also consider other statistical tools and the clinical context of the data."""
)
if view_full_df:
st.dataframe(st.session_state.df)
if show_table:
if st.session_state.df.shape[1] > 99:
st.warning(f'You have {st.session_state.df.shape[1]} columns. This would not look good in a publication. Less than 50 would be much better.')
else:
nunique = st.session_state.df.select_dtypes(include=['object', 'category']).nunique()
to_drop = nunique[nunique > 15].index
df_filtered = st.session_state.df.drop(to_drop, axis=1)
# Check if any numerical column is binary and add it to categorical list
numerical_columns = df_filtered.select_dtypes(include=[np.number]).columns.tolist()
for col in numerical_columns:
if df_filtered[col].nunique() == 2:
df_filtered[col] = df_filtered[col].astype(str)
categorical = df_filtered.select_dtypes(include=[object]).columns.tolist()
# Use Streamlit to create selection box for categorical variable
st.header("Table 1")
categorical_variable = st.selectbox('Select the categorical variable for grouping:',
options=categorical)
nonnormal_variables = st.multiselect("Select any non-normally distributed variables for rank-based analysis", df_filtered.columns.tolist())
# st.write(df_filtered.head())
table = generate_table(df_filtered, categorical_variable, nonnormal_variables)
# tablefmt = st.radio("Select a format for your table:", ["github", "grid", "fancy_grid", "pipe", "orgtbl", "jira", "presto", "psql", "rst", "mediawiki", "moinmoin", "youtrack", "html", "latex", "latex_raw", "latex_booktabs", "textile"])
# st.header("Table 1")
st.write(table.tabulate(tablefmt = "github"))
st.write("-------")
st.info("""Courtesy of TableOne: Tom J Pollard, Alistair E W Johnson, Jesse D Raffa, Roger G Mark;
tableone: An open source Python package for producing summary statistics
for research papers, JAMIA Open, Volume 1, Issue 1, 1 July 2018, Pages 26–31,
https://doi.org/10.1093/jamiaopen/ooy012""")
st.write("-------")
# Download button for Excel file
if st.checkbox("Click to Download Your Table 1"):
table_format = st.selectbox("Select a file format:", ["csv", "excel", "html", "latex"])
# Save DataFrame as Excel file
if table_format == "excel":
output_path = "./output/tableone_results.xlsx"
table.to_excel(output_path)
# Provide the download link
st.markdown(get_download_link(output_path, "xlsx"), unsafe_allow_html=True)
if table_format == "csv":
output_path = "./output/tableone_results.csv"
table.to_csv(output_path)
# Provide the download link
st.markdown(get_download_link(output_path, "csv"), unsafe_allow_html=True)
if table_format == "html":
output_path = "./output/tableone_results.html"
table.to_html(output_path)
# Provide the download link
st.markdown(get_download_link(output_path, "html"), unsafe_allow_html=True)
if table_format == "latex":
output_path = "./output/tableone_results.tex"
table.to_latex(output_path)
st.markdown(get_download_link(output_path, "tex"), unsafe_allow_html=True)
# Save DataFrame as Excel file
if perform_pca:
# Create PCA plot
pca_fig2 = perform_pca_plot(st.session_state.df)
save_image(pca_fig2, 'pca_plot.png')
scree_plot = create_scree_plot(st.session_state.df)
save_image(scree_plot, 'scree_plot.png')
with st.expander("What is PCA?"):
st.write("""Principal Component Analysis, or PCA, is a method used to highlight important information in datasets that have many variables and to bring out strong patterns in a dataset. It's a way of identifying underlying structure in data.
Here's an analogy that might make it more understandable: Imagine a swarm of bees flying around in a three-dimensional space: up/down, left/right, and forward/backward. These are our original variables. Now, imagine you want to take a picture of this swarm that captures as much information as possible, but your camera can only take pictures in two dimensions. You can rotate your camera in any direction, but once you take a picture, you'll lose the third dimension.
PCA helps us choose the best angle to take this picture. The first principal component (PC1) represents the best angle that captures the most variation in the swarm. The second principal component (PC2) is the best angle perpendicular to the first that captures the remaining variation, and so on. The idea is to minimize the information (variance) lost when we reduce dimensionality (like going from a 3D swarm to a 2D picture).
In a medical context, you might have data from thousands of genes or hundreds of physical and behavioral characteristics. Not all of these variables are independent, and many of them tend to change together. PCA allows us to represent the data in fewer dimensions that capture the most important variability in the dataset.
Each Principal Component represents a combination of original features (like genes or patient characteristics) and can often be interpreted in terms of those features. For example, a PC might represent a combination of patient's age, blood pressure, and cholesterol level. The coefficients of the features in the PC (the "loadings") tell us how much each feature contributes to that PC.
Finally, PCA can be particularly useful in visualizing high-dimensional data. By focusing on the first two or three principal components, we can create a scatterplot of our data, potentially highlighting clusters or outliers. However, remember that this visualization doesn't capture all the variability in the data—only the variability best captured by the first few principal components.""")
with tab2:
st.info("""N.B. This merely shows a glimpse of what is possible. Any model shown is not yet optimized and requires ML and domain level expertise.
Yet, this is a good start to get a sense of what is possible."""
)
try:
x = st.session_state.df
except NameError:
st.warning("First upload a CSV file or choose a demo dataset from the **Data Exploration** tab")
else:
# Filter categorical columns and numerical bivariate columns
categorical_cols = st.session_state.df.select_dtypes(include=[object]).columns.tolist()
# Add bivariate numerical columns
numerical_bivariate_cols = [col for col in st.session_state.df.select_dtypes(include=['int64', 'float64']).columns
if st.session_state.df[col].nunique() == 2]
# Combine the two lists and sort them
categorical_cols = categorical_cols + numerical_bivariate_cols
categorical_cols.sort() # sort the list of columns
with st.expander("Click to see your current dataset"):
st.info("The first 5 rows:")
st.write(st.session_state.df.head())
st.subheader("""
Choose the Target Column
""")
target_col = st.selectbox('Select a categorical column as the target:', categorical_cols)
st.subheader("""
Set the Target Class Value to Predict
""")
try:
categories_to_predict = st.multiselect('Select one or more categories but not all. You need 2 options to predict a group, i.e, your target versus the rest.:', st.session_state.df[target_col].unique().tolist(), key = "target_categories-ml")
# Preprocess the data and exclude the target column from preprocessing
df_processed, included_cols, excluded_cols = preprocess(st.session_state.df.drop(columns=[target_col]), target_col)
df_processed[target_col] = st.session_state.df[target_col] # Include the target column back into the dataframe
st.subheader("""
Select Features to Include in the Model
""")
st.info(f"Available Features for your Model: {included_cols}")
st.warning(f"Your Selected Target for Prediction: {target_col} = {categories_to_predict}")
all_features = st.checkbox("Select all features", value=False, key="select_all_features-10")
if all_features:
final_columns = included_cols
else:
final_columns = st.multiselect('Select features to include in your model:', included_cols, key = "columns_to_include-10")
if len(excluded_cols) > 0:
st.write(f"Unavailable columns for modeling: {excluded_cols}")
# Create binary target variable based on the selected categories
df_processed[target_col] = df_processed[target_col].apply(lambda x: 1 if x in categories_to_predict else 0)
X = df_processed[final_columns]
# st.write(X.head())
# Split the dataframe into data and labels
# List of available scaling options
scaling_options = {
"No Scaling": None,
"Standard Scaling": StandardScaler(),
"Min-Max Scaling": MinMaxScaler(),
}
# List of available normalization options
normalization_options = {
"No Normalization": None,
"L1 Normalization": "l1",
"L2 Normalization": "l2",
}
scaling_or_norm = st.checkbox("Scaling or Normalization?", value=False, key="scaling_or_norm-10")
# User selection for scaling option
if scaling_or_norm == True:
scaling_option = st.selectbox("Select Scaling Option", list(scaling_options.keys()))
# User selection for normalization option
normalization_option = st.selectbox("Select Normalization Option", list(normalization_options.keys()))
# Apply selected scaling and normalization options to the features
if scaling_option != "No Scaling":
scaler = scaling_options[scaling_option]
X = scaler.fit_transform(X)
if normalization_option != "No Normalization":
normalization_type = normalization_options[normalization_option]
X = normalize(X, norm=normalization_type)
# X = df_processed.drop(columns=[target_col])
# X = df_processed[final_columns]
y = df_processed[target_col]
# Split into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# pca_check = st.checkbox("PCA?", value=False, key="pca_check-10")
# if pca_check == True:
# n_neighbors = 3
# random_state = 0
# dim = len(X[0])
# n_classes = len(np.unique(y))
# # Reduce dimension to 2 with PCA
# pca = make_pipeline(StandardScaler(), PCA(n_components=2, random_state=random_state))
# # Reduce dimension to 2 with LinearDiscriminantAnalysis
# lda = make_pipeline(StandardScaler(), LinearDiscriminantAnalysis(n_components=2))
# # Reduce dimension to 2 with NeighborhoodComponentAnalysis
# nca = make_pipeline(
# StandardScaler(),
# NeighborhoodComponentsAnalysis(n_components=2, random_state=random_state),
# )
# # Use a nearest neighbor classifier to evaluate the methods
# knn = KNeighborsClassifier(n_neighbors=n_neighbors)
# # Make a list of the methods to be compared
# dim_reduction_methods = [("PCA", pca), ("LDA", lda), ("NCA", nca)]
# # plt.figure()
# for i, (name, model) in enumerate(dim_reduction_methods):
# plt.figure()
# # plt.subplot(1, 3, i + 1, aspect=1)
# # Fit the method's model
# model.fit(X_train, y_train)
# # Fit a nearest neighbor classifier on the embedded training set
# knn.fit(model.transform(X_train), y_train)
# # Compute the nearest neighbor accuracy on the embedded test set
# acc_knn = knn.score(model.transform(X_test), y_test)
# # Embed the data set in 2 dimensions using the fitted model
# X_embedded = model.transform(X)
# # Plot the projected points and show the evaluation score
# plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c=y, s=30, cmap="Set1")
# plt.title(
# "{}, KNN (k={})\nTest accuracy = {:.2f}".format(name, n_neighbors, acc_knn)
# )
# fig = plt.show()
# st.pyplot(fig)
except:
st.warning("Please select a target column first or pick a dataset with a target column avaialble.")
st.subheader("""
Choose the Machine Learning Model
""")
model_option = st.selectbox(
"Which machine learning model would you like to use?",
("Logistic Regression", "Decision Tree", "Random Forest", "Gradient Boosting Machines (GBMs)", "Support Vector Machines (SVMs)", "Neural Network")
)
perform_shapley = st.checkbox("Include a Shapley Force Plot", value=False, key="perform_shapley-10")
if perform_shapley == True:
st.warning("Shapley interpretation of the model is computationally expensive for some models and may take a while to run. Please be patient")
if st.button("Predict"):
if model_option == "Logistic Regression":
model = LogisticRegression()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
y_scores = model.predict_proba(X_test)[:, 1]
with st.expander("What is logistic regression?"):
st.write("""
Logistic regression is a statistical model commonly used in the field of medicine to predict binary outcomes - such as whether a patient has a disease (yes/no), whether a patient survived or not after a treatment (survived/did not survive), etc.
Logistic regression, like linear regression, establishes a relationship between the predictor variables (such as patient's age, weight, smoking history) and the target variable (e.g., presence or absence of a disease). However, unlike linear regression which predicts a continuous outcome, logistic regression predicts the probability of an event occurring, which is perfect for binary (two-category) outcomes.
Here's a simplified step-by-step breakdown:
1. **Collect and Prepare Your Data**: This involves gathering medical data that includes both the outcome (what you want to predict) and predictor variables (information you will use to make the prediction).
2. **Build the Model**: Logistic regression uses a mathematical formula that looks somewhat similar to the formula for a line in algebra (y = mx + b), but it's modified to predict probabilities. The formula takes your predictors and calculates the "log odds" of the event occurring.
3. **Interpret the Model**: The coefficients (the values that multiply the predictors) in the logistic regression model represent the change in the log odds of the outcome for a one-unit increase in the predictor variable. For example, if age is a predictor and its coefficient is 0.05, it means that for each one year increase in age, the log odds of the disease occurring (assuming all other factors remain constant) increase by 0.05. Because these are "log odds", the relationship between the predictors and the probability of the outcome isn't a straight line, but a curve that can't go below 0 or above 1.
4. **Make Predictions**: You can input a new patient's information into the logistic regression equation, and it will output the predicted probability of the outcome. For example, it might predict a patient has a 75% chance of having a disease. You can then convert this into a binary outcome by setting a threshold, such as saying any probability above 50% will be considered a "yes."
Remember that logistic regression, while powerful, makes several assumptions. It assumes a linear relationship between the log odds of the outcome and the predictor variables, it assumes that errors are not measured and that there's no multicollinearity (a high correlation among predictor variables). As with any model, it's also only as good as the data you feed into it.
In the medical field, logistic regression can be a helpful tool to predict outcomes and identify risk factors. However, it's important to understand its assumptions and limitations and to use clinical judgment alongside the model's predictions.""")
display_metrics(y_test, predictions, y_scores)
# After training the logistic regression model, assuming the model's name is "model"
coeff = model.coef_[0]
features = X_train.columns
equation = "Logit(P) = " + str(model.intercept_[0])
for c, feature in zip(coeff, features):
equation += " + " + str(c) + " * " + feature
st.write("The equation of the logistic regression model is:")
st.write(equation)
if perform_shapley == True: # shapley explanation
# Scale the features
with st.expander("What is a Shapley Force Plot?"):
st.markdown(shapley_explanation)
with st.spinner("Performing Analysis for the Shapley Force Plot..."):
# Standardize the features
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# shapley explanation using KernelExplainer
explainer = shap.KernelExplainer(model.predict_proba, shap.sample(X_train_scaled, 100))
shap_values = explainer.shap_values(X_test_scaled)
# Sort features by absolute contribution for the first instance in the test set
sorted_indices = np.argsort(np.abs(shap_values[1][0]))[::-1]
sorted_shap_values = shap_values[1][0][sorted_indices]
sorted_feature_names = X_test.columns[sorted_indices]
# Create a DataFrame to display sorted features and their shapley values
sorted_features_df = pd.DataFrame({
'Feature': sorted_feature_names,
'Shapley_Value': sorted_shap_values
})
# Display the sorted features DataFrame in Streamlit
st.table(sorted_features_df)
# Generate and display the sorted force plot
shap_html = shap.force_plot(explainer.expected_value[1], sorted_shap_values, sorted_feature_names, show=False)
shap.save_html("sorted_shap_plot.html", shap_html)
with open("sorted_shap_plot.html", "r") as f:
st.components.v1.html(f.read(), height=500)
elif model_option == "Decision Tree":
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
y_scores = model.predict_proba(X_test)[:, 1]
with st.expander("What is a decision tree?"):
st.write("""
A decision tree is a type of predictive model that you can think of as similar to the flowcharts sometimes used in medical diagnosis. They're made up of nodes (decision points) and branches (choices or outcomes), and they aim to predict an outcome based on input data.
Here's how they work:
1. **Start at the root node**: This is the first decision that needs to be made and it's based on one of your input variables. For instance, in a medical context, this might be a question like, "Is the patient's temperature above 100.4 degrees Fahrenheit?"
2. **Follow the branch for your answer**: If the answer is "yes," follow the branch for "yes," and if it's "no," follow the branch for "no."
3. **Make the next decision**: Each branch leads to another node, where another decision will be made based on another variable. Maybe this time it's, "Does the patient have a cough?"
4. **Continue until you reach a leaf node**: Leaf nodes are nodes without any further branches. They represent the final decisions and are predictions of the outcome. In a binary outcome scenario, leaf nodes could represent "disease" or "no disease."
The decision tree "learns" from data by splitting the data at each node based on what would provide the most significant increase in information (i.e., the best separation of positive and negative cases). For instance, if patients with a certain disease often have a fever, the model might learn to split patients based on whether they have a fever.
While decision trees can be powerful and intuitive tools, there are a few caveats to keep in mind:
- **Overfitting**: If a tree is allowed to grow too deep (too many decision points), it may start to fit not just the underlying trends in the data, but also the random noise. This means it will perform well on the data it was trained on, but poorly on new data.
- **Instability**: Small changes in the data can result in a very different tree. This can be mitigated by using ensemble methods, which combine many trees together (like a random forest).
- **Simplicity**: Decision trees make very simple, linear cuts in the data. They can struggle with relationships in the data that are more complex.
Overall, decision trees can be an excellent tool for understanding and predicting binary outcomes from medical data. They can handle a mixture of data types, deal with missing data, and the results are interpretable and explainable. Just like with any medical test, though, the results should be interpreted with care and in the context of other information available."""
)
display_metrics(y_test, predictions, y_scores)
if perform_shapley == True: # shapley explanation
# Scale the features
with st.expander("What is a Shapley Force Plot?"):
st.markdown(shapley_explanation)
with st.spinner("Performing Analysis for the Shapley Force Plot..."):
# shapley explanation using TreeExplainer
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X_test)
# Sort features by absolute contribution for the first instance in the test set
sorted_indices = np.argsort(np.abs(shap_values[1][0]))[::-1]
sorted_shap_values = shap_values[1][0][sorted_indices]
sorted_feature_names = X_test.columns[sorted_indices]
# Create a DataFrame to display sorted features and their shapley values
sorted_features_df = pd.DataFrame({
'Feature': sorted_feature_names,
'Shapley_Value': sorted_shap_values
})
# Display the sorted features DataFrame in Streamlit
st.table(sorted_features_df)
# Generate and display the sorted force plot
shap_html = shap.force_plot(explainer.expected_value[1], sorted_shap_values, sorted_feature_names, show=False)
shap.save_html("sorted_shap_plot.html", shap_html)
with open("sorted_shap_plot.html", "r") as f:
st.components.v1.html(f.read(), height=500)
elif model_option == "Random Forest":
model = RandomForestClassifier()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
y_scores = model.predict_proba(X_test)[:, 1]
with st.expander("What is a random forest?"):
st.write("""
Random Forest is a type of machine learning model that is excellent for making predictions (both binary and multi-class) based on multiple input variables, which can be both categorical (like gender: male or female) and numerical (like age or blood pressure).
Imagine you have a patient and you have collected a lot of data about them - age, weight, cholesterol level, blood pressure, whether or not they smoke, etc. You want to predict a binary outcome: will they have a heart attack in the next 10 years or not?
A Random Forest works a bit like a team of doctors, each of whom asks a series of questions to make their own diagnosis (or prediction). These doctors are analogous to "decision trees" - the building blocks of a Random Forest.
Here's a simplified breakdown of how it works:
1. **Building Decision Trees**: Each "doctor" (or decision tree) in the Random Forest gets a random subset of patients' data. They ask questions like, "Is the patient's age over 60?", "Is their cholesterol level over 200?". Depending on the answers, they follow different paths down the tree, leading to a final prediction. The tree is constructed in a way that the most important questions (those that best split the patients according to the outcome) are asked first.
2. **Making Predictions**: To make a prediction for a new patient, each decision tree in the Random Forest independently makes a prediction. Essentially, each tree "votes" for the outcome it thinks is most likely (heart attack or no heart attack).
3. **Combining the Votes**: The Random Forest combines the votes from all decision trees. The outcome that gets the most votes is the Random Forest's final prediction. This is like asking a team of doctors for their opinions and going with the majority vote.
One of the main strengths of Random Forest is that it can handle complex data with many variables and it doesn't require a lot of data preprocessing (like scaling or normalizing data). Also, it is less prone to "overfitting" compared to individual decision trees. Overfitting is when a model learns the training data too well, to the point where it captures noise and performs poorly when predicting outcomes for new, unseen data.
However, it's important to note that while Random Forest often performs well, it can be somewhat of a "black box", meaning it can be hard to understand why it's making the predictions it's making. It's always crucial to validate the model's predictions against your medical knowledge and context."""
)
display_metrics(y_test, predictions, y_scores)
if perform_shapley == True: # shapley explanation
# Scale the features
with st.expander("What is a Shapley Force Plot?"):
st.markdown(shapley_explanation)
with st.spinner("Performing Analysis for the Shapley Force Plot..."):
# shapley explanation using TreeExplainer
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X_test)
# Sort features by absolute contribution for the first instance in the test set
sorted_indices = np.argsort(np.abs(shap_values[1][0]))[::-1]
sorted_shap_values = shap_values[1][0][sorted_indices]
sorted_feature_names = X_test.columns[sorted_indices]
# Create a DataFrame to display sorted features and their shapley values
sorted_features_df = pd.DataFrame({
'Feature': sorted_feature_names,
'Shapley_Value': sorted_shap_values
})
# Display the sorted features DataFrame in Streamlit
st.table(sorted_features_df)
# Generate and display the sorted force plot
shap_html = shap.force_plot(explainer.expected_value[1], sorted_shap_values, sorted_feature_names, show=False)
shap.save_html("sorted_shap_plot.html", shap_html)
with open("sorted_shap_plot.html", "r") as f:
st.components.v1.html(f.read(), height=500)
elif model_option == "Gradient Boosting Machines (GBMs)":
model = GradientBoostingClassifier()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
y_scores = model.predict_proba(X_test)[:, 1]
with st.expander("What is a gradient boosting machine?"):
st.write("""Gradient Boosting Machines, like Random Forests, are a type of machine learning model that is good at making predictions based on multiple input variables. These variables can be both categorical (like patient sex: male or female) and numerical (like age, heart rate, etc.).
Again, suppose we're trying to predict a binary outcome: will this patient develop diabetes in the next five years or not?
A GBM also uses decision trees as its building blocks, but there's a crucial difference in how GBM combines these trees compared to Random Forests. Rather than having each tree independently make a prediction and then voting on the final outcome, GBMs build trees in sequence where each new tree is trying to correct the mistakes of the combined existing trees.
Here's a simplified breakdown of how it works:
1. **Building the First Tree**: A single decision tree is built to predict the outcome based on the input variables. However, this tree is usually very simple and doesn't do a great job at making accurate predictions.
2. **Building Subsequent Trees**: New trees are added to the model. Each new tree is constructed to correct the errors made by the existing set of trees. It does this by predicting the 'residual errors' of the previous ensemble of trees. In other words, it tries to predict how much the current model is 'off' for each patient.
3. **Combining the Trees**: The predictions from all trees are added together to make the final prediction. Each tree's contribution is 'weighted', so trees that do a better job at correcting errors have a bigger say in the final prediction.
GBMs are a very powerful method and often perform exceptionally well. Like Random Forests, they can handle complex data with many variables. But they also have a few additional strengths:
- GBMs can capture more complex patterns than Random Forests because they build trees sequentially, each learning from the last.
- GBMs can also give an estimate of the importance of each variable in making predictions, which can be very useful in understanding what's driving your predictions.
However, GBMs do have their challenges:
- They can be prone to overfitting if not properly tuned. Overfitting happens when your model is too complex and starts to capture noise in your data rather than the true underlying patterns.
- They can also be more computationally intensive than other methods, meaning they might take longer to train, especially with larger datasets.
Just like with any model, it's crucial to validate the model's predictions with your medical knowledge and consider the context. It's also important to remember that while GBMs can make very accurate predictions, they don't prove causation. They can identify relationships and patterns in your data, but they can't tell you why those patterns exist.""")
display_metrics(y_test, predictions, y_scores)
if perform_shapley == True: # shapley explanation
# Scale the features
with st.expander("What is a Shapley Force Plot?"):
st.markdown(shapley_explanation)
with st.spinner("Performing Analysis for the Shapley Force Plot..."):
# shapley explanation
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X_test)
# Check if shap_values is a list (multi-class) or a single array (binary classification or regression)
if isinstance(shap_values, list):
shap_values_for_class = shap_values[1] # Assuming you're interested in the second class
else:
shap_values_for_class = shap_values
# Sort features by absolute contribution for the first instance in the test set
sorted_indices = np.argsort(np.abs(shap_values_for_class[0]))[::-1]
sorted_shap_values = shap_values_for_class[0][sorted_indices]
sorted_feature_names = X_test.columns[sorted_indices]
# Create a DataFrame to display sorted features and their shapley values
sorted_features_df = pd.DataFrame({
'Feature': sorted_feature_names,
'Shapley_Value': sorted_shap_values
})
# Display the sorted features DataFrame in Streamlit
st.table(sorted_features_df)
# Generate and display the sorted force plot
shap_html = shap.force_plot(explainer.expected_value, sorted_shap_values, sorted_feature_names, show=False)
shap.save_html("sorted_shap_plot.html", shap_html)
with open("sorted_shap_plot.html", "r") as f:
st.components.v1.html(f.read(), height=500)
elif model_option == "Support Vector Machines (SVMs)":
model = svm.SVC(probability=True)
model.fit(X_train, y_train)
predictions = model.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
y_scores = model.predict_proba(X_test)[:, 1]
with st.expander("What is a support vector machine?"):
st.write("""
Support Vector Machines are a type of machine learning model that can be used for both regression and classification tasks. They can handle both numerical and categorical input variables. In the context of predicting a binary outcome in medical data - let's stick with the example of predicting whether a patient will develop diabetes or not in the next five years - an SVM is a classification tool.
Here's a simplified explanation:
1. **Building the Model**: The SVM algorithm tries to find a hyperplane, or a boundary, that best separates the different classes (in our case, 'will develop diabetes' and 'will not develop diabetes'). This boundary is chosen to be the one that maximizes the distance between the closest points (the "support vectors") in each class, which is why it's called a "Support Vector Machine".
2. **Making Predictions**: Once this boundary is established, new patients can be classified by where they fall in relation to this boundary. If a new patient's data places them on the 'will develop diabetes' side of the boundary, the SVM predicts they will develop diabetes.
Here are some strengths and challenges of SVMs:
Strengths:
- SVMs can model non-linear decision boundaries, and there are many kernels to choose from. This can make them more flexible in capturing complex patterns in the data compared to some other methods.
- They are also fairly robust against overfitting, especially in high-dimensional space.
Challenges:
- However, SVMs are not very easy to interpret compared to models like decision trees or logistic regression. The boundaries they produce can be complex and not easily explainable in terms of the input variables.
- SVMs can be inefficient to train with very large datasets, and they require careful preprocessing of the data and tuning of the parameters.
As with any machine learning model, while an SVM can make predictions about patient health, it's crucial to validate these predictions with medical expertise. Furthermore, an SVM can identify relationships in data, but it doesn't explain why these relationships exist. As always, correlation doesn't imply causation.""")
display_metrics(y_test, predictions, y_scores)
if perform_shapley == True:
with st.expander("What is a Shapley Force Plot?"):
st.markdown(shapley_explanation)
with st.spinner("Performing Analysis for the Shapley Force Plot..."):
# shapley explanation using KernelExplainer for SVM
explainer = shap.KernelExplainer(model.predict_proba, shap.sample(X_train, 100))
shap_values = explainer.shap_values(X_test)
# Check if shap_values is a list (multi-class) or a single array (binary classification or regression)
if isinstance(shap_values, list):
shap_values_for_class = shap_values[1] # Assuming you're interested in the second class
else:
shap_values_for_class = shap_values
# Sort features by absolute contribution for the first instance in the test set
sorted_indices = np.argsort(np.abs(shap_values_for_class[0]))[::-1]
sorted_shap_values = shap_values_for_class[0][sorted_indices]
sorted_feature_names = X_test.columns[sorted_indices]
# Create a DataFrame to display sorted features and their shapley values
sorted_features_df = pd.DataFrame({
'Feature': sorted_feature_names,
'Shapley_Value': sorted_shap_values
})
# Display the sorted features DataFrame in Streamlit
st.table(sorted_features_df)
# Generate and display the sorted force plot
shap_html = shap.force_plot(explainer.expected_value[1], sorted_shap_values, sorted_feature_names, show=False)
shap.save_html("sorted_shap_plot.html", shap_html)
with open("sorted_shap_plot.html", "r") as f:
st.components.v1.html(f.read(), height=500)
elif model_option == "Neural Network":
model = MLPClassifier(hidden_layer_sizes=(100,), activation='relu')
model.fit(X_train, y_train)
predictions = model.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
y_scores = model.predict_proba(X_test)[:, 1]
with st.expander("What is a neural network?"):
st.write("""
A neural network is a type of machine learning model inspired by the structure and function of the human brain's neural network. It is excellent for solving complex problems and making predictions based on historical data.
Just like the human brain consists of interconnected neurons, a neural network consists of interconnected artificial neurons called "nodes" or "neurons". These neurons are organized in layers - an input layer, one or more hidden layers, and an output layer. Each neuron takes input from the previous layer, performs a mathematical operation on the input, and passes the result to the next layer.
Here's a simplified breakdown of how a neural network works:
1. **Feedforward**: The input layer receives the input data, which can be numerical or categorical variables. Each neuron in the hidden layers and the output layer performs a weighted sum of the inputs, applies an activation function, and passes the result to the next layer. This process is called feedforward.
2. **Activation Function**: The activation function introduces non-linearity to the neural network, allowing it to learn and model complex relationships in the data. Common activation functions include sigmoid, tanh, and ReLU.
3. **Backpropagation**: After the feedforward process, the neural network compares its predictions to the actual values and calculates the prediction error. It then adjusts the weights and biases of the neurons in the network through a process called backpropagation. This iterative process continues until the neural network reaches a satisfactory level of accuracy.
Neural networks can be used for a wide range of tasks, including regression, classification, and even more complex tasks like image and speech recognition. They have been successfully applied in various domains, including medicine, finance, and natural language processing.
However, it's important to note that neural networks are computationally intensive and require a large amount of training data to generalize well. Additionally, hyperparameter tuning and regularization techniques may be necessary to prevent overfitting and improve performance.
"""
)
display_metrics(y_test, predictions, y_scores)
if perform_shapley == True:
with st.expander("What is a Shapley Force Plot?"):
st.markdown(shapley_explanation)
with st.spinner("Performing Shapley Analysis..."):
# shapley explanation using KernelExplainer for MLP
explainer = shap.KernelExplainer(model.predict_proba, shap.sample(X_train, 100))
shap_values = explainer.shap_values(X_test)
# Check if shap_values is a list (multi-class) or a single array (binary classification or regression)
if isinstance(shap_values, list):
shap_values_for_class = shap_values[1] # Assuming you're interested in the second class
else:
shap_values_for_class = shap_values
# Sort features by absolute contribution for the first instance in the test set
sorted_indices = np.argsort(np.abs(shap_values_for_class[0]))[::-1]
sorted_shap_values = shap_values_for_class[0][sorted_indices]
sorted_feature_names = X_test.columns[sorted_indices]
# Create a DataFrame to display sorted features and their shapley values
sorted_features_df = pd.DataFrame({
'Feature': sorted_feature_names,
'Shapley_Value': sorted_shap_values
})
# Display the sorted features DataFrame in Streamlit
st.table(sorted_features_df)
# Generate and display the sorted force plot
shap_html = shap.force_plot(explainer.expected_value[1], sorted_shap_values, sorted_feature_names, show=False)
shap.save_html("sorted_shap_plot.html", shap_html)
with open("sorted_shap_plot.html", "r") as f:
st.components.v1.html(f.read(), height=500)
| [
"Hello world",
"Columns: PLACEHOLDER\nNumber of rows: PLACEHOLDER",
"Hi! Ask me anything about data science and I'll try to answer it.",
"You are a medical data expert whose purpose is to generate realistic medical data to populate a dataframe. Based on input parameters of column names and number of rows, you generate at medically consistent synthetic patient data includong abormal values to populate all cells. \n10-20% of values should be above or below the normal range appropriate for each column name, but still physiologically possible. For example, SBP could range from 90 to 190. Creatinine might go from 0.5 to 7.0. Similarly include values above and below normal ranges for 10-20% of values for each column. Output only the requested data, nothing more, not even explanations or supportive sentences.\nIf you do not know what kind of data to generate for a column, rename column using the provided name followed by \"-ambiguous\". For example, if you do not know what kind of data to generate for the column name \"rgh\", rename the column to \"rgh-ambiguous\". \nPopululate ambiguous columns with randomly selected 1 or 0 values. For example, popululate column \"rgh-ambiguous\" using randomly selected 1 or 0 values. For diagnoses provided\nas column headers, e.g., \"diabetes\", populate with randomly selected yes or no values. Populate all cells with appropriate values. No missing values.\nAs a critical step review each row to ensure that the data is medically consistent, e.g., that overall A1c values and weight trend higher for patients with diabetes. If not, regenerate the row or rows.\n\nReturn only data, nothing more, not even explanations or supportive sentences. Generate the requested data so it can be processed by the following code into a dataframe:\n\n```\n\n # Use StringIO to convert the string data into file-like object\n data = io.StringIO(response.choices[0].message.content)\n\n # Read the data into a DataFrame, skipping the first row\n df = pd.read_csv(data, sep=\",\", skiprows=1, header=None, names=columns)\n\n```\n\nYour input parameters will be in this format\n\nColumns: ```columns```\nNumber of rows: ```number```\n \n "
] |
2024-01-10 | adarshmusa/Conversational-Fashion-Outfit-Generator_FK-GRiD | st_chat_app.py | import streamlit as st
import os
import openai
os.environ['OPENAI_API_KEY'] = "" # enter OpenAI key here
openai.api_key = "" # enter OpenAI key here
# Setup LLM
from llama_index import LLMPredictor, GPTVectorStoreIndex, PromptHelper
from langchain.chat_models import ChatOpenAI
from llama_index import SimpleDirectoryReader
from llama_index import GPTVectorStoreIndex
import json
documents = SimpleDirectoryReader('./fashion_data').load_data()
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.1, model_name="gpt-3.5-turbo"))
# define prompt helper
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_output = 256
# set maximum chunk overlap
max_chunk_overlap = 20
#prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap) #new 2 line code below
num_output = 4096
prompt_helper = PromptHelper(max_input_size, num_output, chunk_overlap_ratio= 0.2, chunk_size_limit=1024)
custom_LLM_index = GPTVectorStoreIndex(
documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper
)
# define Chatbot as a class
class Chatbot:
def __init__(self, api_key, index):
self.index = index
openai.api_key = api_key
self.chat_history = [{"role": "system",
"content": "You are a friendly fashion outfit recommendation assistant which recommends outfits after taking information regarding age, gender, body shape, occasion and location. Update outfits according to the user's preferences and opinions accordingly. Also, it should collect user name and information and provide them with the user name and information when asked."}]
def generate_response(self, user_input):
prompt = "\n".join([f"{message['role']}: {message['content']}" for message in self.chat_history])
prompt += f"\nUser: {user_input}"
query_engine = custom_LLM_index.as_query_engine()
response = query_engine.query(user_input)
message = {"role": "assistant", "content": response.response}
self.chat_history.append({"role": "user", "content": user_input})
self.chat_history.append(message)
return message
def load_chat_history(self, filename):
try:
with open(filename, 'r') as f:
self.chat_history = json.load(f)
except FileNotFoundError:
pass
def save_chat_history(self, filename):
with open(filename, 'w') as f:
json.dump(self.chat_history, f)
bot = Chatbot(openai.api_key, index=custom_LLM_index)
bot.load_chat_history("chat_history.json")
# Terminal/Command-Line Based Chatbot Implementation
# while True:
# user_input = input("You: ")
# if user_input.lower() in ["bye", "goodbye"]:
# print("Bot: Goodbye!")
# bot.save_chat_history("chat_history.json")
# break
# response = bot.generate_response(user_input)
# print(f"Bot: {response['content']}")
# GUI using streamlit
import streamlit as st
import random
import time
from PIL import Image
import requests
from io import BytesIO
from st_clickable_images import clickable_images
from bs4 import BeautifulSoup
def imglink(word):
url = f"https://www.google.com/search?q={word}&tbm=isch" # the URL of the search result page
response = requests.get(url) # make a GET request to the URL
soup = BeautifulSoup(response.text, "html.parser") # parse the HTML content with BeautifulSoup
# find the first image link by searching for the appropriate tag and attribute
img_tag = soup.find("img", {"class": "yWs4tf"})
if img_tag is not None:
img_link = img_tag.get("src")
print(img_link) # print the first image link
return img_link
else:
print("No image found on the page.")
# GUI created using streamlit
st.title("👘 TrendAI - Your Fashion Expert")
st.subheader("Welcome to TrendAI, your go-to fashion assistant to help you get trendy outfits catered to your unique preferences!\nMade with ❤️ by Ashutosh and Adarsh.")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
prompt = st.chat_input("Suggest an outfit for a 20 year old woman for a picnic.")
if prompt:
if prompt.lower() not in ["bye", "goodbye"]:
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Display assistant response in chat message container
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
response = bot.generate_response(prompt)
assistant_response = response['content']
# Simulate stream of response with milliseconds delay
for chunk in assistant_response.split():
full_response += chunk + " "
time.sleep(0.05)
# Add a blinking cursor to simulate typing
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
# prompt design for outfit-component extraction for displaying respective components
prompt_prefix = "Extract the fashion product names along with their attributes, if any, from the content given and provide them in a single line seperated by comma. Otherwise return the string \"nothing\" ."
prompt_with_input = prompt_prefix + response['content']
response = bot.generate_response(prompt_with_input)
print(f"Bot: {response['content']}")
outfit_components = response['content'].split(',') # Split outfit components
if outfit_components not in ["nothing"]:
images_list=[]
for component in outfit_components:
# Call the imglink function here for each component
img_link = imglink(component)
images_list.insert(0, img_link)
# clicked = clickable_images(
# images_list,
# titles=[f"Image #{str(i)}" for i in range(len(images_list))],
# div_style={"display": "flex", "justify-content": "center", "flex-wrap": "wrap"},
# img_style={"margin": "5px", "height": "200px"},
# )
# container_style = (
# "display: flex; justify-content: center; align-items: center;"
# "margin: 20px; height: 300px;"
# )
# for i in range(len(images_list)):
# print(images_list[i])
#for i in range(len(images_list)):
# st.image(images_list[i], caption=outfit_components[i], use_column_width="never", clamp=True)
clicked = clickable_images(images_list, titles=[f"Image #{str(i)}" for i in range(len(images_list))],
div_style={"display": "flex", "justify-content": "center",
"flex-wrap": "wrap"},
img_style={"margin": "5px", "height": "200px"},
)
#st.markdown(f"Image #{clicked} clicked" if clicked > -1 else "No image clicked")
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": full_response})
else:
# Display assistant response in chat message container
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
bot.save_chat_history("chat_history.json")
message_placeholder = st.empty()
full_response = ""
assistant_response = "Goodbye!"
# Simulate stream of response with milliseconds delay
for chunk in assistant_response.split():
full_response += chunk + " "
time.sleep(0.05)
# Add a blinking cursor to simulate typing
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": full_response}) | [
"\n",
"center",
"Extract the fashion product names along with their attributes, if any, from the content given and provide them in a single line seperated by comma. Otherwise return the string \"nothing\" .",
"PLACEHOLDERPLACEHOLDER",
"You are a friendly fashion outfit recommendation assistant which recommends outfits after taking information regarding age, gender, body shape, occasion and location. Update outfits according to the user's preferences and opinions accordingly. Also, it should collect user name and information and provide them with the user name and information when asked.",
"PLACEHOLDER: PLACEHOLDER",
"content",
"\nUser: PLACEHOLDER",
"Suggest an outfit for a 20 year old woman for a picnic."
] |
2024-01-10 | flyingPig2/jupyter-ai | packages~jupyter-ai-magics~jupyter_ai_magics~magics.py | import base64
import json
import keyword
import os
import re
import sys
import warnings
from typing import Optional
import click
from IPython import get_ipython
from IPython.core.magic import Magics, line_cell_magic, magics_class
from IPython.display import HTML, JSON, Markdown, Math
from jupyter_ai_magics.utils import decompose_model_id, get_lm_providers
from langchain.chains import LLMChain
from .parsers import (
CellArgs,
DeleteArgs,
ErrorArgs,
HelpArgs,
ListArgs,
RegisterArgs,
UpdateArgs,
cell_magic_parser,
line_magic_parser,
)
from .providers import BaseProvider
MODEL_ID_ALIASES = {
"gpt2": "huggingface_hub:gpt2",
"gpt3": "openai:text-davinci-003",
"chatgpt": "openai-chat:gpt-3.5-turbo",
"gpt4": "openai-chat:gpt-4",
}
class TextOrMarkdown:
def __init__(self, text, markdown):
self.text = text
self.markdown = markdown
def _repr_mimebundle_(self, include=None, exclude=None):
return {"text/plain": self.text, "text/markdown": self.markdown}
class TextWithMetadata:
def __init__(self, text, metadata):
self.text = text
self.metadata = metadata
def __str__(self):
return self.text
def _repr_mimebundle_(self, include=None, exclude=None):
return ({"text/plain": self.text}, self.metadata)
class Base64Image:
def __init__(self, mimeData, metadata):
mimeDataParts = mimeData.split(",")
self.data = base64.b64decode(mimeDataParts[1])
self.mimeType = re.sub(r";base64$", "", mimeDataParts[0])
self.metadata = metadata
def _repr_mimebundle_(self, include=None, exclude=None):
return ({self.mimeType: self.data}, self.metadata)
DISPLAYS_BY_FORMAT = {
"code": None,
"html": HTML,
"image": Base64Image,
"markdown": Markdown,
"math": Math,
"md": Markdown,
"json": JSON,
"text": TextWithMetadata,
}
NA_MESSAGE = '<abbr title="Not applicable">N/A</abbr>'
PROVIDER_NO_MODELS = "This provider does not define a list of models."
CANNOT_DETERMINE_MODEL_TEXT = """Cannot determine model provider from model ID '{0}'.
To see a list of models you can use, run '%ai list'"""
CANNOT_DETERMINE_MODEL_MARKDOWN = """Cannot determine model provider from model ID `{0}`.
To see a list of models you can use, run `%ai list`"""
AI_COMMANDS = {"delete", "error", "help", "list", "register", "update"}
class FormatDict(dict):
"""Subclass of dict to be passed to str#format(). Suppresses KeyError and
leaves replacement field unchanged if replacement field is not associated
with a value."""
def __missing__(self, key):
return key.join("{}")
class EnvironmentError(BaseException):
pass
class CellMagicError(BaseException):
pass
@magics_class
class AiMagics(Magics):
def __init__(self, shell):
super().__init__(shell)
self.transcript_openai = []
# suppress warning when using old OpenAIChat provider
warnings.filterwarnings(
"ignore",
message="You are trying to use a chat model. This way of initializing it is "
"no longer supported. Instead, please use: "
"`from langchain.chat_models import ChatOpenAI`",
)
self.providers = get_lm_providers()
# initialize a registry of custom model/chain names
self.custom_model_registry = MODEL_ID_ALIASES
def _ai_bulleted_list_models_for_provider(self, provider_id, Provider):
output = ""
if len(Provider.models) == 1 and Provider.models[0] == "*":
if Provider.help is None:
output += f"* {PROVIDER_NO_MODELS}\n"
else:
output += f"* {Provider.help}\n"
else:
for model_id in Provider.models:
output += f"* {provider_id}:{model_id}\n"
output += "\n" # End of bulleted list
return output
def _ai_inline_list_models_for_provider(self, provider_id, Provider):
output = ""
if len(Provider.models) == 1 and Provider.models[0] == "*":
if Provider.help is None:
return PROVIDER_NO_MODELS
else:
return Provider.help
for model_id in Provider.models:
output += f", `{provider_id}:{model_id}`"
# Remove initial comma
return re.sub(r"^, ", "", output)
# Is the required environment variable set?
def _ai_env_status_for_provider_markdown(self, provider_id):
na_message = "Not applicable. | " + NA_MESSAGE
if (
provider_id not in self.providers
or self.providers[provider_id].auth_strategy == None
):
return na_message # No emoji
try:
env_var = self.providers[provider_id].auth_strategy.name
except AttributeError: # No "name" attribute
return na_message
output = f"`{env_var}` | "
if os.getenv(env_var) == None:
output += (
'<abbr title="You have not set this environment variable, '
+ "so you cannot use this provider's models.\">❌</abbr>"
)
else:
output += (
'<abbr title="You have set this environment variable, '
+ "so you can use this provider's models.\">✅</abbr>"
)
return output
def _ai_env_status_for_provider_text(self, provider_id):
if (
provider_id not in self.providers
or self.providers[provider_id].auth_strategy == None
):
return "" # No message necessary
try:
env_var = self.providers[provider_id].auth_strategy.name
except AttributeError: # No "name" attribute
return ""
output = f"Requires environment variable {env_var} "
if os.getenv(env_var) != None:
output += "(set)"
else:
output += "(not set)"
return output + "\n"
# Is this a name of a Python variable that can be called as a LangChain chain?
def _is_langchain_chain(self, name):
# Reserved word in Python?
if keyword.iskeyword(name):
return False
acceptable_name = re.compile("^[a-zA-Z0-9_]+$")
if not acceptable_name.match(name):
return False
ipython = get_ipython()
return name in ipython.user_ns and isinstance(ipython.user_ns[name], LLMChain)
# Is this an acceptable name for an alias?
def _validate_name(self, register_name):
# A registry name contains ASCII letters, numbers, hyphens, underscores,
# and periods. No other characters, including a colon, are permitted
acceptable_name = re.compile("^[a-zA-Z0-9._-]+$")
if not acceptable_name.match(register_name):
raise ValueError(
"A registry name may contain ASCII letters, numbers, hyphens, underscores, "
+ "and periods. No other characters, including a colon, are permitted"
)
# Initially set or update an alias to a target
def _safely_set_target(self, register_name, target):
# If target is a string, treat this as an alias to another model.
if self._is_langchain_chain(target):
ip = get_ipython()
self.custom_model_registry[register_name] = ip.user_ns[target]
else:
# Ensure that the destination is properly formatted
if ":" not in target:
raise ValueError(
"Target model must be an LLMChain object or a model name in PROVIDER_ID:MODEL_NAME format"
)
self.custom_model_registry[register_name] = target
def handle_delete(self, args: DeleteArgs):
if args.name in AI_COMMANDS:
raise ValueError(
f"Reserved command names, including {args.name}, cannot be deleted"
)
if args.name not in self.custom_model_registry:
raise ValueError(f"There is no alias called {args.name}")
del self.custom_model_registry[args.name]
output = f"Deleted alias `{args.name}`"
return TextOrMarkdown(output, output)
def handle_register(self, args: RegisterArgs):
# Existing command names are not allowed
if args.name in AI_COMMANDS:
raise ValueError(f"The name {args.name} is reserved for a command")
# Existing registered names are not allowed
if args.name in self.custom_model_registry:
raise ValueError(
f"The name {args.name} is already associated with a custom model; "
+ "use %ai update to change its target"
)
# Does the new name match expected format?
self._validate_name(args.name)
self._safely_set_target(args.name, args.target)
output = f"Registered new alias `{args.name}`"
return TextOrMarkdown(output, output)
def handle_update(self, args: UpdateArgs):
if args.name in AI_COMMANDS:
raise ValueError(
f"Reserved command names, including {args.name}, cannot be updated"
)
if args.name not in self.custom_model_registry:
raise ValueError(f"There is no alias called {args.name}")
self._safely_set_target(args.name, args.target)
output = f"Updated target of alias `{args.name}`"
return TextOrMarkdown(output, output)
def _ai_list_command_markdown(self, single_provider=None):
output = (
"| Provider | Environment variable | Set? | Models |\n"
+ "|----------|----------------------|------|--------|\n"
)
if single_provider is not None and single_provider not in self.providers:
return f"There is no model provider with ID `{single_provider}`."
for provider_id, Provider in self.providers.items():
if single_provider is not None and provider_id != single_provider:
continue
output += (
f"| `{provider_id}` | "
+ self._ai_env_status_for_provider_markdown(provider_id)
+ " | "
+ self._ai_inline_list_models_for_provider(provider_id, Provider)
+ " |\n"
)
# Also list aliases.
if single_provider is None and len(self.custom_model_registry) > 0:
output += (
"\nAliases and custom commands:\n\n"
+ "| Name | Target |\n"
+ "|------|--------|\n"
)
for key, value in self.custom_model_registry.items():
output += f"| `{key}` | "
if isinstance(value, str):
output += f"`{value}`"
else:
output += "*custom chain*"
output += " |\n"
return output
def _ai_list_command_text(self, single_provider=None):
output = ""
if single_provider is not None and single_provider not in self.providers:
return f"There is no model provider with ID '{single_provider}'."
for provider_id, Provider in self.providers.items():
if single_provider is not None and provider_id != single_provider:
continue
output += (
f"{provider_id}\n"
+ self._ai_env_status_for_provider_text(
provider_id
) # includes \n if nonblank
+ self._ai_bulleted_list_models_for_provider(provider_id, Provider)
)
# Also list aliases.
if single_provider is None and len(self.custom_model_registry) > 0:
output += "\nAliases and custom commands:\n"
for key, value in self.custom_model_registry.items():
output += f"{key} - "
if isinstance(value, str):
output += value
else:
output += "custom chain"
output += "\n"
return output
def handle_error(self, args: ErrorArgs):
no_errors = "There have been no errors since the kernel started."
# Find the most recent error.
ip = get_ipython()
if "Err" not in ip.user_ns:
return TextOrMarkdown(no_errors, no_errors)
err = ip.user_ns["Err"]
# Start from the previous execution count
excount = ip.execution_count - 1
last_error = None
while excount >= 0 and last_error is None:
if excount in err:
last_error = err[excount]
else:
excount = excount - 1
if last_error is None:
return TextOrMarkdown(no_errors, no_errors)
prompt = f"Explain the following error:\n\n{last_error}"
# Set CellArgs based on ErrorArgs
cell_args = CellArgs(
type="root", model_id=args.model_id, format=args.format, reset=False
)
return self.run_ai_cell(cell_args, prompt)
def _append_exchange_openai(self, prompt: str, output: str):
"""Appends a conversational exchange between user and an OpenAI Chat
model to a transcript that will be included in future exchanges."""
self.transcript_openai.append({"role": "user", "content": prompt})
self.transcript_openai.append({"role": "assistant", "content": output})
def _decompose_model_id(self, model_id: str):
"""Breaks down a model ID into a two-tuple (provider_id, local_model_id). Returns (None, None) if indeterminate."""
if model_id in self.custom_model_registry:
model_id = self.custom_model_registry[model_id]
return decompose_model_id(model_id, self.providers)
def _get_provider(self, provider_id: Optional[str]) -> BaseProvider:
"""Returns the model provider ID and class for a model ID. Returns None if indeterminate."""
if provider_id is None or provider_id not in self.providers:
return None
return self.providers[provider_id]
def display_output(self, output, display_format, md):
# build output display
DisplayClass = DISPLAYS_BY_FORMAT[display_format]
# if the user wants code, add another cell with the output.
if display_format == "code":
# Strip a leading language indicator and trailing triple-backticks
lang_indicator = r"^```[a-zA-Z0-9]*\n"
output = re.sub(lang_indicator, "", output)
output = re.sub(r"\n```$", "", output)
new_cell_payload = dict(
source="set_next_input",
text=output,
replace=False,
)
ip = get_ipython()
ip.payload_manager.write_payload(new_cell_payload)
return HTML(
"AI generated code inserted below ⬇️", metadata=md
)
if DisplayClass is None:
return output
if display_format == "json":
# JSON display expects a dict, not a JSON string
output = json.loads(output)
output_display = DisplayClass(output, metadata=md)
# finally, display output display
return output_display
def handle_help(self, _: HelpArgs):
# The line parser's help function prints both cell and line help
with click.Context(line_magic_parser, info_name="%ai") as ctx:
click.echo(line_magic_parser.get_help(ctx))
def handle_list(self, args: ListArgs):
return TextOrMarkdown(
self._ai_list_command_text(args.provider_id),
self._ai_list_command_markdown(args.provider_id),
)
def run_ai_cell(self, args: CellArgs, prompt: str):
provider_id, local_model_id = self._decompose_model_id(args.model_id)
Provider = self._get_provider(provider_id)
if Provider is None:
return TextOrMarkdown(
CANNOT_DETERMINE_MODEL_TEXT.format(args.model_id)
+ "\n\n"
+ "If you were trying to run a command, run '%ai help' to see a list of commands.",
CANNOT_DETERMINE_MODEL_MARKDOWN.format(args.model_id)
+ "\n\n"
+ "If you were trying to run a command, run `%ai help` to see a list of commands.",
)
# if `--reset` is specified, reset transcript and return early
if provider_id == "openai-chat" and args.reset:
self.transcript_openai = []
return
# Determine provider and local model IDs
# If this is a custom chain, send the message to the custom chain.
if args.model_id in self.custom_model_registry and isinstance(
self.custom_model_registry[args.model_id], LLMChain
):
return self.display_output(
self.custom_model_registry[args.model_id].run(prompt),
args.format,
{"jupyter_ai": {"custom_chain_id": args.model_id}},
)
# validate presence of authn credentials
auth_strategy = self.providers[provider_id].auth_strategy
if auth_strategy:
# TODO: handle auth strategies besides EnvAuthStrategy
if auth_strategy.type == "env" and auth_strategy.name not in os.environ:
raise OSError(
f"Authentication environment variable {auth_strategy.name} not provided.\n"
f"An authentication token is required to use models from the {Provider.name} provider.\n"
f"Please specify it via `%env {auth_strategy.name}=token`. "
) from None
# configure and instantiate provider
provider_params = {"model_id": local_model_id}
if provider_id == "openai-chat":
provider_params["prefix_messages"] = self.transcript_openai
# for SageMaker, validate that required params are specified
if provider_id == "sagemaker-endpoint":
if (
args.region_name is None
or args.request_schema is None
or args.response_path is None
):
raise ValueError(
"When using the sagemaker-endpoint provider, you must specify all of "
+ "the --region-name, --request-schema, and --response-path options."
)
provider_params["region_name"] = args.region_name
provider_params["request_schema"] = args.request_schema
provider_params["response_path"] = args.response_path
# Validate that the request schema is well-formed JSON
try:
json.loads(args.request_schema)
except json.JSONDecodeError as e:
raise ValueError(
"request-schema must be valid JSON. "
f"Error at line {e.lineno}, column {e.colno}: {e.msg}"
) from None
provider = Provider(**provider_params)
# Apply a prompt template.
prompt = provider.get_prompt_template(args.format).format(prompt=prompt)
# interpolate user namespace into prompt
ip = get_ipython()
prompt = prompt.format_map(FormatDict(ip.user_ns))
# generate output from model via provider
result = provider.generate([prompt])
output = result.generations[0][0].text
# if openai-chat, append exchange to transcript
if provider_id == "openai-chat":
self._append_exchange_openai(prompt, output)
md = {"jupyter_ai": {"provider_id": provider_id, "model_id": local_model_id}}
return self.display_output(output, args.format, md)
@line_cell_magic
def ai(self, line, cell=None):
raw_args = line.split(" ")
if cell:
args = cell_magic_parser(raw_args, prog_name="%%ai", standalone_mode=False)
else:
args = line_magic_parser(raw_args, prog_name="%ai", standalone_mode=False)
if args == 0:
# this happens when `--help` is called on the root command, in which
# case we want to exit early.
return
# If a value error occurs, don't print the full stacktrace
try:
if args.type == "error":
return self.handle_error(args)
if args.type == "help":
return self.handle_help(args)
if args.type == "list":
return self.handle_list(args)
if args.type == "register":
return self.handle_register(args)
if args.type == "delete":
return self.handle_delete(args)
if args.type == "update":
return self.handle_update(args)
except ValueError as e:
print(e, file=sys.stderr)
return
# hint to the IDE that this object must be of type `RootArgs`
args: CellArgs = args
if not cell:
raise CellMagicError(
"""[0.8+]: To invoke a language model, you must use the `%%ai`
cell magic. The `%ai` line magic is only for use with
subcommands."""
)
prompt = cell.strip()
# interpolate user namespace into prompt
ip = get_ipython()
prompt = prompt.format_map(FormatDict(ip.user_ns))
return self.run_ai_cell(args, prompt)
| [
"Explain the following error:\n\nPLACEHOLDER"
] |
2024-01-10 | flyingPig2/jupyter-ai | packages~jupyter-ai-magics~jupyter_ai_magics~providers.py | import asyncio
import base64
import copy
import functools
import io
import json
from concurrent.futures import ThreadPoolExecutor
from typing import Any, ClassVar, Coroutine, Dict, List, Literal, Optional, Union
from jsonpath_ng import parse
from langchain import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.llms import (
AI21,
Anthropic,
Bedrock,
Cohere,
HuggingFaceHub,
OpenAI,
OpenAIChat,
SagemakerEndpoint,
)
from langchain.llms.sagemaker_endpoint import LLMContentHandler
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
from pydantic import BaseModel, Extra, root_validator
class EnvAuthStrategy(BaseModel):
"""Require one auth token via an environment variable."""
type: Literal["env"] = "env"
name: str
class MultiEnvAuthStrategy(BaseModel):
"""Require multiple auth tokens via multiple environment variables."""
type: Literal["file"] = "file"
names: List[str]
class AwsAuthStrategy(BaseModel):
"""Require AWS authentication via Boto3"""
type: Literal["aws"] = "aws"
AuthStrategy = Optional[
Union[
EnvAuthStrategy,
MultiEnvAuthStrategy,
AwsAuthStrategy,
]
]
class Field(BaseModel):
key: str
label: str
# "text" accepts any text
format: Literal["json", "jsonpath", "text"]
class TextField(Field):
type: Literal["text"] = "text"
class MultilineTextField(Field):
type: Literal["text-multiline"] = "text-multiline"
Field = Union[TextField, MultilineTextField]
class BaseProvider(BaseModel):
#
# pydantic config
#
class Config:
extra = Extra.allow
#
# class attrs
#
id: ClassVar[str] = ...
"""ID for this provider class."""
name: ClassVar[str] = ...
"""User-facing name of this provider."""
models: ClassVar[List[str]] = ...
"""List of supported models by their IDs. For registry providers, this will
be just ["*"]."""
help: ClassVar[str] = None
"""Text to display in lieu of a model list for a registry provider that does
not provide a list of models."""
model_id_key: ClassVar[str] = ...
"""Kwarg expected by the upstream LangChain provider."""
pypi_package_deps: ClassVar[List[str]] = []
"""List of PyPi package dependencies."""
auth_strategy: ClassVar[AuthStrategy] = None
"""Authentication/authorization strategy. Declares what credentials are
required to use this model provider. Generally should not be `None`."""
registry: ClassVar[bool] = False
"""Whether this provider is a registry provider."""
fields: ClassVar[List[Field]] = []
"""User inputs expected by this provider when initializing it. Each `Field` `f`
should be passed in the constructor as a keyword argument, keyed by `f.key`."""
#
# instance attrs
#
model_id: str
prompt_templates: Dict[str, PromptTemplate]
"""Prompt templates for each output type. Can be overridden with
`update_prompt_template`. The function `prompt_template`, in the base class,
refers to this."""
def __init__(self, *args, **kwargs):
try:
assert kwargs["model_id"]
except:
raise AssertionError(
"model_id was not specified. Please specify it as a keyword argument."
)
model_kwargs = {}
if self.__class__.model_id_key != "model_id":
model_kwargs[self.__class__.model_id_key] = kwargs["model_id"]
model_kwargs["prompt_templates"] = {
"code": PromptTemplate.from_template(
"{prompt}\n\nProduce output as source code only, "
"with no text or explanation before or after it."
),
"html": PromptTemplate.from_template(
"{prompt}\n\nProduce output in HTML format only, "
"with no markup before or afterward."
),
"image": PromptTemplate.from_template(
"{prompt}\n\nProduce output as an image only, "
"with no text before or after it."
),
"markdown": PromptTemplate.from_template(
"{prompt}\n\nProduce output in markdown format only."
),
"md": PromptTemplate.from_template(
"{prompt}\n\nProduce output in markdown format only."
),
"math": PromptTemplate.from_template(
"{prompt}\n\nProduce output in LaTeX format only, "
"with $$ at the beginning and end."
),
"json": PromptTemplate.from_template(
"{prompt}\n\nProduce output in JSON format only, "
"with nothing before or after it."
),
"text": PromptTemplate.from_template("{prompt}"), # No customization
}
super().__init__(*args, **kwargs, **model_kwargs)
async def _call_in_executor(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
"""
Calls self._call() asynchronously in a separate thread for providers
without an async implementation. Requires the event loop to be running.
"""
executor = ThreadPoolExecutor(max_workers=1)
loop = asyncio.get_running_loop()
_call_with_args = functools.partial(self._call, *args, **kwargs)
return await loop.run_in_executor(executor, _call_with_args)
def update_prompt_template(self, format: str, template: str):
"""
Changes the class-level prompt template for a given format.
"""
self.prompt_templates[format] = PromptTemplate.from_template(template)
def get_prompt_template(self, format) -> PromptTemplate:
"""
Produce a prompt template suitable for use with a particular model, to
produce output in a desired format.
"""
if format in self.prompt_templates:
return self.prompt_templates[format]
else:
return self.prompt_templates["text"] # Default to plain format
class AI21Provider(BaseProvider, AI21):
id = "ai21"
name = "AI21"
models = [
"j1-large",
"j1-grande",
"j1-jumbo",
"j1-grande-instruct",
"j2-large",
"j2-grande",
"j2-jumbo",
"j2-grande-instruct",
"j2-jumbo-instruct",
]
model_id_key = "model"
pypi_package_deps = ["ai21"]
auth_strategy = EnvAuthStrategy(name="AI21_API_KEY")
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
class AnthropicProvider(BaseProvider, Anthropic):
id = "anthropic"
name = "Anthropic"
models = [
"claude-v1",
"claude-v1.0",
"claude-v1.2",
"claude-2",
"claude-instant-v1",
"claude-instant-v1.0",
]
model_id_key = "model"
pypi_package_deps = ["anthropic"]
auth_strategy = EnvAuthStrategy(name="ANTHROPIC_API_KEY")
class CohereProvider(BaseProvider, Cohere):
id = "cohere"
name = "Cohere"
models = ["medium", "xlarge"]
model_id_key = "model"
pypi_package_deps = ["cohere"]
auth_strategy = EnvAuthStrategy(name="COHERE_API_KEY")
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
HUGGINGFACE_HUB_VALID_TASKS = (
"text2text-generation",
"text-generation",
"text-to-image",
)
class HfHubProvider(BaseProvider, HuggingFaceHub):
id = "huggingface_hub"
name = "Hugging Face Hub"
models = ["*"]
model_id_key = "repo_id"
help = (
"See https://huggingface.co/models for a list of models. "
"Pass a model's repository ID as the model ID; for example, `huggingface_hub:ExampleOwner/example-model`."
)
# ipywidgets needed to suppress tqdm warning
# https://stackoverflow.com/questions/67998191
# tqdm is a dependency of huggingface_hub
pypi_package_deps = ["huggingface_hub", "ipywidgets"]
auth_strategy = EnvAuthStrategy(name="HUGGINGFACEHUB_API_TOKEN")
registry = True
# Override the parent's validate_environment with a custom list of valid tasks
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.inference_api import InferenceApi
repo_id = values["repo_id"]
client = InferenceApi(
repo_id=repo_id,
token=huggingfacehub_api_token,
task=values.get("task"),
)
if client.task not in HUGGINGFACE_HUB_VALID_TASKS:
raise ValueError(
f"Got invalid task {client.task}, "
f"currently only {HUGGINGFACE_HUB_VALID_TASKS} are supported"
)
values["client"] = client
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
return values
# Handle image outputs
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call out to Hugging Face Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string or image generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
response = self.client(inputs=prompt, params=_model_kwargs)
if type(response) is dict and "error" in response:
raise ValueError(f"Error raised by inference API: {response['error']}")
# Custom code for responding to image generation responses
if self.client.task == "text-to-image":
imageFormat = response.format # Presume it's a PIL ImageFile
mimeType = ""
if imageFormat == "JPEG":
mimeType = "image/jpeg"
elif imageFormat == "PNG":
mimeType = "image/png"
elif imageFormat == "GIF":
mimeType = "image/gif"
else:
raise ValueError(f"Unrecognized image format {imageFormat}")
buffer = io.BytesIO()
response.save(buffer, format=imageFormat)
# Encode image data to Base64 bytes, then decode bytes to str
return mimeType + ";base64," + base64.b64encode(buffer.getvalue()).decode()
if self.client.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif self.client.task == "text2text-generation":
text = response[0]["generated_text"]
else:
raise ValueError(
f"Got invalid task {self.client.task}, "
f"currently only {HUGGINGFACE_HUB_VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
class OpenAIProvider(BaseProvider, OpenAI):
id = "openai"
name = "OpenAI"
models = [
"text-davinci-003",
"text-davinci-002",
"text-curie-001",
"text-babbage-001",
"text-ada-001",
"davinci",
"curie",
"babbage",
"ada",
]
model_id_key = "model_name"
pypi_package_deps = ["openai"]
auth_strategy = EnvAuthStrategy(name="OPENAI_API_KEY")
class ChatOpenAIProvider(BaseProvider, OpenAIChat):
id = "openai-chat"
name = "OpenAI"
models = [
"gpt-4",
"gpt-4-0314",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301",
]
model_id_key = "model_name"
pypi_package_deps = ["openai"]
auth_strategy = EnvAuthStrategy(name="OPENAI_API_KEY")
def append_exchange(self, prompt: str, output: str):
"""Appends a conversational exchange between user and an OpenAI Chat
model to a transcript that will be included in future exchanges."""
self.prefix_messages.append({"role": "user", "content": prompt})
self.prefix_messages.append({"role": "assistant", "content": output})
# uses the new OpenAIChat provider. temporarily living as a separate class until
# conflicts can be resolved
class ChatOpenAINewProvider(BaseProvider, ChatOpenAI):
id = "openai-chat-new"
name = "OpenAI"
models = [
"gpt-4",
"gpt-4-0314",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301",
]
model_id_key = "model_name"
pypi_package_deps = ["openai"]
auth_strategy = EnvAuthStrategy(name="OPENAI_API_KEY")
class JsonContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def __init__(self, request_schema, response_path):
self.request_schema = json.loads(request_schema)
self.response_path = response_path
self.response_parser = parse(response_path)
def replace_values(self, old_val, new_val, d: Dict[str, Any]):
"""Replaces values of a dictionary recursively."""
for key, val in d.items():
if val == old_val:
d[key] = new_val
if isinstance(val, dict):
self.replace_values(old_val, new_val, val)
return d
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
request_obj = copy.deepcopy(self.request_schema)
self.replace_values("<prompt>", prompt, request_obj)
request = json.dumps(request_obj).encode("utf-8")
return request
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
matches = self.response_parser.find(response_json)
return matches[0].value
class SmEndpointProvider(BaseProvider, SagemakerEndpoint):
id = "sagemaker-endpoint"
name = "SageMaker endpoint"
models = ["*"]
model_id_key = "endpoint_name"
# This all needs to be on one line of markdown, for use in a table
help = (
"Specify an endpoint name as the model ID. "
"In addition, you must include the `--region_name`, `--request_schema`, and the `--response_path` arguments. "
"For more information, see the documentation about [SageMaker endpoints deployment](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints-deployment.html) "
"and about [using magic commands with SageMaker endpoints](https://jupyter-ai.readthedocs.io/en/latest/users/index.html#using-magic-commands-with-sagemaker-endpoints)."
)
pypi_package_deps = ["boto3"]
auth_strategy = AwsAuthStrategy()
registry = True
fields = [
TextField(key="region_name", label="Region name", format="text"),
MultilineTextField(key="request_schema", label="Request schema", format="json"),
TextField(key="response_path", label="Response path", format="jsonpath"),
]
def __init__(self, *args, **kwargs):
request_schema = kwargs.pop("request_schema")
response_path = kwargs.pop("response_path")
content_handler = JsonContentHandler(
request_schema=request_schema, response_path=response_path
)
super().__init__(*args, **kwargs, content_handler=content_handler)
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
class BedrockProvider(BaseProvider, Bedrock):
id = "bedrock"
name = "Amazon Bedrock"
models = [
"amazon.titan-tg1-large",
"anthropic.claude-v1",
"anthropic.claude-instant-v1",
"anthropic.claude-v2",
"ai21.j2-jumbo-instruct",
"ai21.j2-grande-instruct",
]
model_id_key = "model_id"
pypi_package_deps = ["boto3"]
auth_strategy = AwsAuthStrategy()
async def _acall(self, *args, **kwargs) -> Coroutine[Any, Any, str]:
return await self._call_in_executor(*args, **kwargs)
| [
"{prompt}\n\nProduce output in LaTeX format only, with $$ at the beginning and end.",
"{prompt}\n\nProduce output in HTML format only, with no markup before or afterward.",
"{prompt}\n\nProduce output in JSON format only, with nothing before or after it.",
"{prompt}\n\nProduce output as an image only, with no text before or after it.",
"{prompt}\n\nProduce output as source code only, with no text or explanation before or after it.",
"{prompt}",
"{prompt}\n\nProduce output in markdown format only."
] |
2024-01-10 | niceIrene/SMARTFEAT | SMARTFEAT~search.py | import os
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.chains import LLMChain
from SMARTFEAT.operator_new import Descrtizer, BinaryOperator, MultiExtractor, AggregateOperator, UnaryOperator, BinaryOperatorAlter
from SMARTFEAT.serialize import *
import copy
import itertools
import pandas as pd
class CurrentAttrLst(object):
# maintain a set of current attributes with a similarity checker
def __init__(self, cur_attr_lst, agenda, data_df, model, budget):
self.cur_attr_lst =copy.deepcopy(cur_attr_lst)
self.cur_attr_str = str(cur_attr_lst)
self.data_agenda = agenda
self.step = 0
self.last_op = None
self.df = data_df
self.model = model
self.previous = 0
self.budget = budget
self.budget_cur = 0
# for column drop
self.unary_attr = []
self.other_attr = []
def __str__(self):
return f"The current attribute list is {self.data_agenda}"
def determine_similarity(self, new_attr):
print("similarity check!!")
if self.data_agenda.find(new_attr) != -1:
print("Do not have similar feature")
return True
else:
print("Exist similar feature")
return False
def update(self, new_attr, new_desr):
self.cur_attr_lst.append(new_attr)
self.cur_attr_str = str(self.cur_attr_lst)
self.data_agenda = self.data_agenda + ", \n {}: {}".format(new_attr, new_desr)
def text_completion_extract(df, new_feature, temp=0.1):
# use 3.5 turbo here for better efficiency
llm = OpenAI(temperature = temp, model_name='gpt-3.5-turbo')
new_col_val = []
for idx, row in df.iterrows():
attr_lst = list(df.columns)
row_str = row_serialization(row, attr_lst)
if idx == 5:
# interact with the user to see if they want to continue
user_input = input("Five row-completion examples have been displayed, If you want to enable this feature, type 'Continue'; otherwise, press 'Enter': ")
if user_input != 'Continue':
raise ValueError("The user does not want this feature")
try:
response_schema = [
ResponseSchema(name=new_feature, description="string or float, representing attribute value"),
]
output_parser = StructuredOutputParser.from_response_schemas(response_schema)
format_instructions = output_parser.get_format_instructions()
prompt = PromptTemplate(
template=row_str + "{new_feature}:? \n{format_instructions}",
input_variables=["new_feature"],
partial_variables={"format_instructions": format_instructions}
)
chain = LLMChain(llm=llm, prompt=prompt)
result_str = chain.run({"new_feature": new_feature})
print(result_str)
result_dict = output_parser.parse(result_str)
new_value = result_dict[new_feature]
new_col_val.append(new_value)
except Exception as e:
print("Error", str(e))
new_col_val.append(np.nan)
df[new_feature] = new_col_val
def feature_generator_propose(cur_state, org_features, predict_col):
print("The current step is !!!!!")
print(cur_state.step)
# Unary
col = org_features[cur_state.step]
op_unary = UnaryOperator(cur_state.data_agenda, cur_state.model, predict_col, col)
res_lst = op_unary.generate_new_feature(temp= 0.3)
cur_state.last_op = op_unary
return res_lst
# binary enumeration
# elif cur_state.step >= len(org_features):
# if cur_state.last_op is None:
# pairs = list(itertools.combinations(org_features, 2))
# p = pairs[cur_state.step - len(org_features)]
# op_binary = BinaryOperator(cur_state.data_agenda, cur_state.model, predict_col, p)
# res_lst = op_binary.generate_new_feature(temp= 0.1)
# cur_state.last_op = op_binary
# return res_lst
# elif isinstance(cur_state.last_op, BinaryOperator):
# return -1
def feature_genetor_sampling(cur_state, predict_col, tempurature, n_sample):
# for each operator, when we incur three generation failures/ repeated generations, we stop it.
if cur_state.budget_cur >= cur_state.budget:
print("Budget reached!!!!")
return -1
if cur_state.previous >4:
return -1
if isinstance(cur_state.last_op, UnaryOperator) or (isinstance(cur_state.last_op, BinaryOperatorAlter) and cur_state.previous<=4):
op_binary = BinaryOperatorAlter(cur_state.data_agenda, cur_state.model, predict_col, n_sample)
res_lst = op_binary.generate_new_feature(temp= 0.7)
cur_state.last_op = op_binary
return res_lst
elif isinstance(cur_state.last_op, AggregateOperator) and cur_state.previous<=4:
op_agg = AggregateOperator(cur_state.data_agenda, cur_state.model, predict_col, n_sample)
res_lst = op_agg.generate_new_feature(temp=tempurature)
cur_state.last_op = op_agg
print(res_lst)
return res_lst
elif isinstance(cur_state.last_op, MultiExtractor) and cur_state.previous<=4:
op_multi = MultiExtractor(cur_state.data_agenda, cur_state.model,predict_col)
res_lst = op_multi.generate_new_feature(temp=tempurature)
cur_state.last_op = op_multi
return res_lst
def state_evaluator(result_dict, cur_state, predict_col):
# obtain the function and apply the function
if result_dict is None:
state_update(cur_state, False, "Result Dict is None")
else:
if isinstance(cur_state.last_op, UnaryOperator):
new_col, descr, rel_cols, rel_agenda = cur_state.last_op.parse_output(result_dict)
func_str = cur_state.last_op.find_function(result_dict)
one_hot_flag = 0
if func_str == 'encoding':
one_hot_flag = 1
try:
if one_hot_flag == 0:
exec(func_str)
func_name = obtain_function_name(func_str)
func_obj =locals()[func_name]
try:
# handle one-hot-encoding as specific case:
if 'encoding' in result_dict['description']:
org_cols = list(cur_state.df.columns)
one_hot_flag = 1
onehot_df = pd.get_dummies(cur_state.df, columns=[rel_cols[0]])
cur_state.df = pd.concat([cur_state.df[rel_cols[0]], onehot_df], axis=1)
new_cols = list(cur_state.df.columns)
else:
try:
cur_state.df[new_col] = cur_state.df.apply(lambda row: func_obj(row[rel_cols[0]]), axis=1)
except:
cur_state.df[new_col] = func_obj(cur_state.df[rel_cols[0]])
if one_hot_flag:
if evaluate_for_one_hot(org_cols, new_cols, 10):
state_update(cur_state, True, '', new_col, descr)
# update unary list
cur_state.unary_attr.append(rel_cols[0])
else:
state_update(cur_state,False, "The cardinality for the one-hot encoding it too big")
print("The cardinality for the one-hot encoding it too big!!!!")
cur_state.df = cur_state.df.drop(columns=list(set(new_cols) - set(org_cols)), axis=1)
elif ig_evaluate(cur_state.df, new_col) and not one_hot_flag:
state_update(cur_state, True, '', new_col, descr)
# update unary list
cur_state.unary_attr.append(rel_cols[0])
else:
state_update(cur_state,False, "New feature '{}' is imbalanced or contains many NAN rows.".format(new_col))
cur_state.df = cur_state.df.drop(columns=[new_col])
except Exception as e:
state_update(cur_state,False, "Function '{}' cannot be applied to the dataframe.".format(func_str))
print("Error:", str(e))
except:
state_update(cur_state, False, "New Feature: {}, descr: {}, function: {} cannot be obtained or execute".format(new_col, descr, func_str))
elif isinstance(cur_state.last_op, BinaryOperatorAlter) or isinstance(cur_state.last_op, BinaryOperator):
new_col = result_dict['new_feature']
rel_cols = result_dict['relevant']
descr = result_dict['description']
func_str = cur_state.last_op.find_function(result_dict)
if cur_state.determine_similarity(new_col):
state_update(cur_state,False, "New feature '{}' is similar to existing feature".format(new_col))
else:
try:
exec(func_str)
func_name = obtain_function_name(func_str)
func_obj =locals()[func_name]
try:
cur_state.df[new_col] = cur_state.df.apply(lambda row: func_obj(row[rel_cols[0]], row[rel_cols[1]]), axis = 1)
if ig_evaluate(cur_state.df, new_col):
cur_state.budget_cur += 1
state_update(cur_state, True, '', new_col, descr)
# update the columns that have been used in other operators
cur_state.other_attr.extend(rel_cols)
else:
state_update(cur_state,False, "New feature '{}' imbalanced or contains many NAN rows.".format(new_col))
cur_state.df = cur_state.df.drop(coluns = [new_col])
except:
state_update(cur_state,False, "Function '{}' cannot be applied to the dataframe.".format(func_str))
except:
state_update(cur_state, False, "New Feature: {}, descr: {}, function: {} cannot be obtained or execute".format(new_col, descr, func_str))
elif isinstance(cur_state.last_op, AggregateOperator):
new_col = result_dict["new_feature"]
groupby_col = result_dict['groupby_col']
agg_col = result_dict['agg_col']
function = result_dict['function']
temp_dict = {}
for index, r in enumerate(groupby_col):
if (cur_state.df[r].dtype != 'object') and cur_state.df[r].nunique() > 20:
column_lst = list(cur_state.df.columns)
if "Bucketized_" + r in column_lst:
groupby_col[index] = "Bucketized_" + r
continue
temp_dict['new_feature'] = "Bucketized_{}".format(r)
temp_dict['description'] = "Bucketized {}".format(r)
temp_dict['relevant'] = r
temp_op = Descrtizer(cur_state.data_agenda, cur_state.model,predict_col, r)
func_str = temp_op.find_function(temp_dict)
exec(func_str)
func_name = obtain_function_name(func_str)
func_obj =locals()[func_name]
cur_state.df[temp_dict['new_feature']] = cur_state.df.apply(lambda row: func_obj(row[r]), axis = 1)
groupby_col[index] = temp_dict['new_feature']
else:
continue
new_col = 'GROUPBY_' + str(groupby_col) + '_' + function + '_' + agg_col
new_desr = "df.groupby({})[{}].transform({})".format(groupby_col, agg_col, function)
if cur_state.determine_similarity(new_col):
state_update(cur_state,False, "New feature '{}' is similar to existing feature".format(new_col))
else:
cur_state.df[new_col] = cur_state.df.groupby(groupby_col)[agg_col].transform(function)
if ig_evaluate(cur_state.df, new_col):
cur_state.budget_cur += 1
state_update(cur_state, True, '', new_col, new_col)
# update the columns that have been used in other operators
cur_state.other_attr.extend(groupby_col)
cur_state.other_attr.append(agg_col)
else:
state_update(cur_state,False, "New feature '{}' is imbalanced or contains many NAN rows.".format(new_col))
cur_state.df = cur_state.df.drop([new_col])
elif isinstance(cur_state.last_op, MultiExtractor):
new_col, descr, rel_cols, rel_agenda = cur_state.last_op.parse_output(result_dict)
try:
find_answer = cur_state.last_op.find_function(result_dict)
except:
state_update(cur_state,False, "find function fail")
return
if find_answer is None:
state_update(cur_state,False, "Invalid feature")
elif 'TEXT' in find_answer:
try:
text_completion_extract(cur_state.df, new_col)
except:
state_update(cur_state,False, "text-completion error")
else:
print("found the lambda function")
print(find_answer)
try:
exec(find_answer)
func_name = obtain_function_name(find_answer)
func_obj = locals()[func_name]
if isinstance(rel_cols, list):
try:
cur_state.df[new_col] = cur_state.df.apply(lambda row: func_obj(*[row[col] for col in rel_cols]), axis = 1)
if ig_evaluate(cur_state.df, new_col):
cur_state.budget_cur += 1
state_update(cur_state, True, '', new_col, descr)
# update the columns that have been used in other operators
cur_state.other_attr.extend(rel_cols)
else:
# if not success, roll back the udpate
state_update(cur_state,False, "New feature '{}' is imbalanced or contains many NAN rows.".format(new_col))
cur_state.df = cur_state.df.drop([new_col])
except Exception as e:
print(e)
state_update(cur_state,False, "wrong input format")
except Exception as e:
print(e)
state_update(cur_state,False, "Function apply error")
def is_column_imbalanced(df, column_name, threshold=0.8):
column_values = df[column_name]
value_counts = column_values.value_counts()
most_common_value_count = value_counts.max()
total_values = len(column_values)
if most_common_value_count / total_values > threshold:
return True
else:
return False
def ig_evaluate(df, new_col):
column = df[new_col]
nan_percentage = column.isna().mean() * 100
if nan_percentage > 30 or is_column_imbalanced(df, new_col):
return False
else:
return True
def evaluate_for_one_hot(org_cols, new_cols, c = 10):
# remove high cardinality features.
if len(new_cols) - len(org_cols) >= 10:
return False
else:
return True
def state_update(cur_state, result_f_or_t, false_msg, new_col = '', new_desrc = ''):
print("The current budget is: ", cur_state.budget_cur)
if result_f_or_t:
print("update state")
cur_state.previous = 0
# update the budget
cur_state.update(new_col, new_desrc)
print(cur_state.df.head(10))
# cache the intermediate result
cur_state.df.to_csv("current_df_{}_{}.csv".format(cur_state.step, new_col))
else:
print(false_msg)
if isinstance(cur_state.last_op, MultiExtractor) or isinstance(cur_state.last_op, BinaryOperatorAlter) or isinstance(cur_state.last_op, AggregateOperator):
cur_state.previous += 1
| [
"new_feature",
"PLACEHOLDER{new_feature}:? \n{format_instructions}",
"{new_feature}:? \n{format_instructions}",
"format_instructions"
] |
2024-01-10 | niceIrene/SMARTFEAT | run~prediction_new.py | import sys
print(sys.executable)
import pandas as pd
import numpy as np
import sys
sys.path.append('../')
sys.path.append("./")
sys.path.append('../prompts/')
import pathlib as Path
from SMARTFEAT.serialize import *
import os
import argparse
import openai
from langchain.output_parsers import CommaSeparatedListOutputParser
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.chains import LLMChain
from langchain.agents import load_tools
from langchain.memory import ConversationBufferMemory
from SMARTFEAT.search import CurrentAttrLst
import pandas as pd
import numpy as np
from SMARTFEAT.Prediction_helper import *
from sklearn.model_selection import train_test_split
from SMARTFEAT.search import *
import copy
from SMARTFEAT.feature_evaluation import *
import time
def parse_args():
args = argparse.ArgumentParser()
args.add_argument('--path', type= str, default='./dataset/pima_diabetes/')
args.add_argument('--predict_col', type=str, default='Outcome')
args.add_argument('--csv', type=str, default='diabetes.csv')
# args.add_argument('--model', type=str, choices=['gpt-4', 'gpt-3.5-turbo', 'text-davinci-003'], default='gpt-3.5-turbo')
args.add_argument('--temperature', type=float, default=0.7)
args.add_argument('--n_generate_sample', type=int, default=1)
args.add_argument('--clf_model', type=str, default='Decision Tree')
args.add_argument('--delimiter', type=int, default=1)
args.add_argument('--sampling_budget', type=int, default=10)
args = args.parse_args()
return args
args = parse_args()
if args.delimiter ==1:
data_df = pd.read_csv(args.path + args.csv, delimiter=',')
else:
data_df = pd.read_csv(args.path + args.csv, delimiter=';')
attributes = list(data_df.columns)
attributes.remove(args.predict_col)
org_features = attributes
with open(args.path+ 'data_agenda.txt', 'r') as f:
data_agenda = f.read()
f.close
# drop the index column if have
for c in list(org_features):
if 'Unnamed' in c:
org_features.remove(c)
print("The original features are")
print(org_features)
# initialize the root state
cur_attr_lst = CurrentAttrLst(org_features, data_agenda, data_df, args.clf_model, args.sampling_budget)
while True:
try:
if cur_attr_lst.step < len(org_features):
result_lst = -1
result_lst = feature_generator_propose(cur_attr_lst, org_features, args.predict_col)
cur_attr_lst.step += 1
if result_lst is None:
continue
for r in result_lst:
print("Start value evaluation")
print(r)
state_evaluator(r, cur_attr_lst, args.predict_col)
else:
result_lst = feature_genetor_sampling(cur_attr_lst, args.predict_col, args.temperature, args.n_generate_sample)
if result_lst == -1 or cur_attr_lst.budget_cur > cur_attr_lst.budget:
cur_attr_lst.budget_cur = 0
# more than five continuous failures or reach budget
if isinstance(cur_attr_lst.last_op, MultiExtractor):
print("Search process ends")
break
elif isinstance(cur_attr_lst.last_op, BinaryOperatorAlter):
# for binary operator reaches the generation error times.
print("Binary ends, go to aggregator")
cur_attr_lst.previous = 0
cur_attr_lst.last_op = AggregateOperator(cur_attr_lst.data_agenda, cur_attr_lst.model, args.predict_col, args.n_generate_sample)
continue
elif isinstance(cur_attr_lst.last_op, AggregateOperator):
# for binary operator reaches the generation error times.
print("Aggregate ends, go to extract")
cur_attr_lst.previous = 0
cur_attr_lst.last_op = MultiExtractor(cur_attr_lst.data_agenda, cur_attr_lst.model, args.predict_col)
continue
elif result_lst is None or len(result_lst) == 0:
print("result lst is empty")
cur_attr_lst.previous += 1
continue
else:
for r in result_lst:
if cur_attr_lst.budget_cur <= cur_attr_lst.budget and cur_attr_lst.previous < 5:
state_evaluator(r, cur_attr_lst, args.predict_col)
else:
print("Budget or error times reached!!!!")
break
except Exception as e:
print("Exception occurs!!!")
print(e)
wait_time = 2 # Delay in seconds
time.sleep(wait_time)
continue
# lastly drop columns
cols_to_drop = list(set(cur_attr_lst.unary_attr) - set(cur_attr_lst.other_attr))
print("Columns to drop is")
print(cols_to_drop)
cur_attr_lst.df = cur_attr_lst.df.drop(columns = cols_to_drop, axis=1)
print(cur_attr_lst.df.head())
cur_attr_lst.df.to_csv("current_df_final.csv") | [] |
2024-01-10 | niceIrene/SMARTFEAT | SMARTFEAT~serialize.py | import os
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.chains import LLMChain
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
import re
import numpy as np
import pandas as pd
def data_preproessing(data_df, y_label):
# first remove the index columns
data_df = data_df.replace([np.inf, -np.inf], np.nan).dropna(axis=1)
for c in list(data_df.columns):
if 'Unnamed' in c:
data_df = data_df.drop(columns=[c])
# drop NAN and inf
data_df = data_df.replace([np.inf, -np.inf], np.nan).dropna(axis=1)
# obtain the set of features
attributes = list(data_df.columns)
attributes.remove(y_label)
features = attributes
# factorize these columns
for c in data_df.columns:
if type(data_df[c][0]) != np.int64 and type(data_df[c][0]) != np.float64:
print(type(data_df[c][0]))
data_df[c] = data_df[c].astype(object)
data_df[c], _ = pd.factorize(data_df[c])
return data_df, features
def obtain_rel_agenda(rel_cols, agenda):
rel_agenda = []
for col in rel_cols:
start = agenda.find(col)
end = agenda.find('\n', start)
if end == -1:
end = None
rel_agenda.append(agenda[start:end])
return ', '.join(rel_agenda)
def obtain_function_name(func_str):
match = re.search(r"def (\w+)\(", func_str)
return match.group(1)
def obtain_relevant_cols(output_dic):
rel_cols = output_dic["relevant"].split('\n')
rel_cols_new = []
for c in rel_cols:
c = c.strip()
rel_cols_new.append(c)
return rel_cols_new
def row_serialization(row, attr_lst):
row_ser = ''
for a in attr_lst:
row_ser = row_ser + str(a) + ":" + str(row[a])+ ","
return row_ser
def obtain_function_new_features(func_str):
pos = func_str.find('return')
return_str = func_str[pos+7:]
attributes = return_str.split(',')
return attributes
def text_completion_extract(df, new_feature, temp=0.1):
# use 3.5 turbo here for better efficiency
llm = OpenAI(temperature = temp, model_name='gpt-3.5-turbo')
new_col_val = []
for idx, row in df.iterrows():
attr_lst = list(df.columns)
row_str = row_serialization(row, attr_lst)
if idx == 5:
# interact with the user to see if they want to continue
user_input = input("Five row-completion examples have been displayed, If you want to enable this feature, type 'Continue'; otherwise, press 'Enter': ")
if user_input != 'Continue':
raise ValueError("The user does not want this feature")
try:
response_schema = [
ResponseSchema(name=new_feature, description="string or float, representing attribute value"),
]
output_parser = StructuredOutputParser.from_response_schemas(response_schema)
format_instructions = output_parser.get_format_instructions()
prompt = PromptTemplate(
template=row_str + "{new_feature}:? \n{format_instructions}",
input_variables=["new_feature"],
partial_variables={"format_instructions": format_instructions}
)
chain = LLMChain(llm=llm, prompt=prompt)
result_str = chain.run({"new_feature": new_feature})
print(result_str)
result_dict = output_parser.parse(result_str)
new_value = result_dict[new_feature]
new_col_val.append(new_value)
except Exception as e:
print("Error", str(e))
new_col_val.append(np.nan)
df[new_feature] = new_col_val | [
"new_feature",
"PLACEHOLDER{new_feature}:? \n{format_instructions}",
"{new_feature}:? \n{format_instructions}",
"format_instructions"
] |
2024-01-10 | niceIrene/SMARTFEAT | baselines~CAAFE.py | # %%
from caafe import CAAFEClassifier # Automated Feature Engineering for tabular datasets
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
import os
import openai
from caafe import data
from tabpfn.scripts import tabular_metrics
from sklearn.metrics import accuracy_score
import sys
sys.path.append('../')
from SMARTFEAT.Prediction_helper import *
import time
import pandas as pd
from SMARTFEAT.serialize import *
import numpy as np
from sklearn.model_selection import train_test_split
# %%
openai.api_key = "YOUR_OPENAI_APIKEY"
metric_used = tabular_metrics.auc_metric
# %% load input dataset
data_df = pd.read_csv("../dataset/[DatasetPath]/[DatasetWithNewFeatures].csv")
y_label = 'Y_Label'
# %% data preprocessing
data_df, features = data_preproessing(data_df, y_label)
X = data_df[features]
y = data_df[y_label]
# %% split dataset
X_train, X_test, y_train, y_test =train_test_split(data_df[features],data_df[y_label],
test_size=0.25,
random_state=0,
stratify=data_df[y_label])
df_train = pd.concat([X_train,y_train], axis=1)
df_test = pd.concat([X_test,y_test], axis=1)
#%% before feature engineering
models = GetBasedModel()
names,results, tests = PredictionML(X_train, y_train,X_test, y_test,models)
basedLineScore = ScoreDataFrame(names,results, tests)
basedLineScore
#%% Setup and Run CAAFE for the five classifiers
clf_no_feat_eng = LogisticRegression()
# clf_no_feat_eng = GaussianNB()
# clf_no_feat_eng = RandomForestClassifier()
# clf_no_feat_eng = ExtraTreesClassifier()
# clf_no_feat_eng = MLPClassifier(hidden_layer_sizes=(100, 100), activation='relu', solver='adam', learning_rate='constant', learning_rate_init=0.0001, alpha=0.001, max_iter=1000)
start_time = time.time()
with open('[PathtoFolder]/dataset/[DatasetPath]/data_agenda.txt', 'r') as f:
data_agenda = f.read()
f.close
caafe_clf = CAAFEClassifier(base_classifier=clf_no_feat_eng,
llm_model="gpt-4",
iterations=10)
caafe_clf.fit_pandas(df_train,
target_column_name=y_label,
dataset_description=data_agenda)
pred = caafe_clf.predict(df_test)
end_time = time.time()
print("The total timeoverhead is")
print(end_time-start_time)
# %% print the code
print(caafe_clf.code)
# %%
X_train, X_test, y_train, y_test =train_test_split(data_df[features],data_df[y_label],
test_size=0.25,
random_state=0,
stratify=data_df[y_label])
# apply the code to both the X_train and X_test
'''
Python code given in caafe_clf.code
'''
# %% AUC score after data remedy
from sklearn.metrics import roc_auc_score
clf_no_feat_eng.fit(X_train, y_train)
test_score = roc_auc_score(y_test, clf_no_feat_eng.predict_proba(X_test)[:, 1])
print(test_score)
# %% evaluate feature usefulness.
import sys
sys.path.append('../')
import pathlib as Path
from SMARTFEAT.serialize import *
from SMARTFEAT.feature_evaluation import feature_evaluation_show_all, feature_evaluation_select_k
import pandas as pd
print("===========================================")
print('mutual info')
feature_evaluation_show_all(X_train, y_train, 'mutual info')
print("===========================================")
print('rfe-rf')
feature_evaluation_show_all(X_train, y_train, 'rfe-rf')
print("===========================================")
print('feature_importance')
feature_evaluation_show_all(X_train, y_train, 'feature_importance')
print("===========================================")
# %%
| [] |
2024-01-10 | niceIrene/SMARTFEAT | SMARTFEAT~operator_new.py | import os
import sys
sys.path.append('../')
sys.path.append("./")
sys.path.append('../prompts/')
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.chains import LLMChain
from SMARTFEAT.gpt import *
from prompt import *
import json
import re
from SMARTFEAT.serialize import *
import pandas as pd
class Operator(object):
def __init__(self, agenda,model_prompt, y_attr):
self.data_agenda = agenda
self.y_attr = y_attr
self.model_prompt = model_prompt
class UnaryOperator(Operator):
# template of unary operator
def __init__(self, agenda, model_prompt, y_attr, org_attr):
super().__init__(agenda, model_prompt, y_attr)
self.org_attr = org_attr
def __str__(self):
return f"Unary operator, original feature '{self.org_attr}' to predict {self.y_attr}."
def generate_new_feature(self, temp):
rel_agenda = obtain_rel_agenda([self.org_attr], self.data_agenda)
y_agenda = obtain_rel_agenda([self.y_attr], self.data_agenda)
data_prompt = rel_agenda + ', ' + y_agenda + '\n'
op_prompt = unary_prompt_propose.format(y_attr= self.y_attr, input = self.org_attr)
prompt = "Dataset feature description: " + data_prompt + "Downstream machine learning models: " + self.model_prompt +'\n'+ op_prompt
# for propose, generate one candidate. in the answer, it gives a list of proposals.
res = gpt_propose(prompt = prompt, n = 1, temperature=temp)
res_lst = []
for r in res:
r_lst = r.split()
if any(substr in r for substr in ['(certain)', '(high)', '(Certain)', '(High)']):
result_dict = {}
result_dict['new_feature'] = "{}_{}".format(r_lst[1], self.org_attr)
result_dict['description'] = r
result_dict['relevant'] = self.org_attr
res_lst.append(result_dict)
if len(res_lst)==0:
res_lst = None
print("The set of unary operators to be applied is")
print(res_lst)
return res_lst
def find_function(self, result_dict, temp=0.1):
# handle one-hot-encoding as specific case:
if 'encoding' in result_dict['description'] or 'Encoding' in result_dict['description']:
func_str = 'encoding'
return func_str
new_col, rel_cols, descr, rel_agenda = self.parse_output(result_dict)
llm = OpenAI(temperature=temp)
prompt = PromptTemplate(
input_variables=["new_feature", "relevant", "description", "rel_agenda"],
template="You are a data scientist specializing in feature engineering. Your task is to create a transformation function based on the provided input, output, new feature description, while considering input data types and ranges.\
Generate the most appropriate python function to obtain new feature(s) {new_feature} (output) using feature {relevant} (input), new feature description: {description}, input description: {rel_agenda}. Define the function using the 'def' keyword.")
chain = LLMChain(llm=llm, prompt=prompt)
func_str = chain.run({"new_feature": new_col, "relevant": rel_cols, "description": descr, "rel_agenda": rel_agenda})
print(func_str)
start = func_str.find('def')
func_str = func_str[start:]
return func_str
def parse_output(self, result_dict):
new_col = result_dict['new_feature']
descr = result_dict['description']
rel_cols = obtain_relevant_cols(result_dict)
rel_agenda = obtain_rel_agenda(rel_cols,self.data_agenda)
return new_col, descr, rel_cols, rel_agenda
# used only in Aggregator
class Descrtizer(UnaryOperator):
def __str__(self):
return f"Bucketize '{self.org_attr}' to predict '{self.y_attr}"
def find_function(self, result_dict, temp=0.1):
new_col, rel_cols, descr, rel_agenda = self.parse_output(result_dict)
data_prompt = rel_agenda
llm = OpenAI(temperature=temp)
prompt = PromptTemplate(
input_variables=["data_prompt", "new_feature", "relevant", "description"],
template="You are a data scientist specializing in feature engineering, where you excel in finding the most suitable operation to obtain new features based on attribute context.\
Attribute description: {data_prompt}, generate the most appropriate python function \
to obtain new feature {new_feature} (output) using feature {relevant} (input), function description: {description}. Do not provide a lambda function."
)
chain = LLMChain(llm=llm, prompt=prompt)
func_str = chain.run({"data_prompt": data_prompt, "new_feature": new_col, "relevant": rel_cols, "description": descr})
start = func_str.find('def')
func_str = func_str[start:]
return func_str
# binary operator using propose strategy
class BinaryOperator(Operator):
def __init__(self, agenda, model_prompt, y_attr, org_attrs):
super().__init__(agenda, model_prompt, y_attr)
self.org_attrs = org_attrs
def __str__(self):
return f"binary operator, using {str(self.org_attrs)}."
def parse_output(self, result_dict):
new_col = result_dict['new_feature']
descr = result_dict['description']
rel_cols = obtain_relevant_cols(result_dict)
rel_agenda = obtain_rel_agenda(rel_cols,self.data_agenda)
return new_col, descr, rel_cols, rel_agenda
def generate_new_feature(self, temp):
rel_agenda = obtain_rel_agenda(self.org_attrs, self.data_agenda)
y_agenda = obtain_rel_agenda([self.y_attr], self.data_agenda)
data_prompt = rel_agenda + ',' + y_agenda + '\n'
op_prompt = binary_prompt_propose.format(y_attr= self.y_attr, input = self.org_attrs)
prompt = "Attribute description: " + data_prompt + "Downstream machine learning models:" + self.model_prompt +'\n'+ op_prompt
res = gpt_fix_or_propose_binary(prompt = prompt, n = 1, temperature=temp)
res_lst = []
for r in res:
if 'certain' in r:
result_dict = {}
result_dict['new_feature'] = "Binary_{}".format(str(self.org_attrs))
result_dict['description'] = "Binary operator {}".format(res[0])
relevant_str = str(self.org_attrs[0]) +',' + str(self.org_attrs[1])
result_dict['relevant'] = relevant_str
res_lst.append(result_dict)
if len(res_lst) == 0:
res_lst = None
return res_lst
def find_function(self, result_dict, temp=0.1):
new_col, rel_cols, descr, rel_agenda = self.parse_output(result_dict)
data_prompt = rel_agenda
llm = OpenAI(temperature=temp)
prompt = PromptTemplate(
input_variables=["data_prompt", "new_feature", "relevant", "description"],
template="You are a data scientist specializing in feature engineering, where you excel in finding the most suitable operation to obtain new features based on attribute context.\
Attribute description: {data_prompt}, generate the most appropriate python function with +/-/*//to obtain new feature {new_feature} (output) using features {relevant} (input), function description: {description}. If the selected attribute is /, Handle the case of devide by zero."
)
chain = LLMChain(llm=llm, prompt=prompt)
func_str = chain.run({"data_prompt": data_prompt, "new_feature": new_col, "relevant": rel_cols, "description": descr})
start = func_str.find('def')
func_str = func_str[start:]
return func_str
# binary operator using sampling strategy
class BinaryOperatorAlter(Operator):
def __init__(self, agenda, model_prompt, y_attr, num_samples):
super().__init__(agenda, model_prompt, y_attr)
self.num_samples = num_samples
def __str__(self):
return f"binary operator."
def parse_output(self, result_dict):
new_col = result_dict['new_feature']
descr = result_dict['description']
rel_cols = result_dict['relevant']
rel_agenda = obtain_rel_agenda(rel_cols,self.data_agenda)
return new_col, descr, rel_cols, rel_agenda
def generate_new_feature(self, temp):
op_prompt = binary_prompt_sampling.format(y_attr= self.y_attr)
prompt = "Dataset feature description: " + self.data_agenda + "Downstream machine learning models:" + self.model_prompt +'\n'+ op_prompt
answer = gpt_sampling(prompt = prompt, n = self.num_samples, temperature=temp)
res_lst = []
for i in range(len(answer)):
res_dic = re.search(r'\{[^}]+\}', answer[i])
answer_dict = eval(res_dic.group(0))
res_lst.append(answer_dict)
print("Result list is")
print(res_lst)
return res_lst
def find_function(self, result_dict, temp=0.1):
new_col, descr, rel_cols, rel_agenda = self.parse_output(result_dict)
llm = OpenAI(temperature=temp)
prompt = PromptTemplate(
input_variables=["new_feature", "relevant", "description", "rel_agenda"],
template="You are a data scientist specializing in feature engineering. Your task is to create a transformation function based on the provided input, output, new feature description, while considering input data types and ranges.\
Generate the most appropriate python function with +/-/*//to obtain new feature {new_feature} (output) using features {relevant} (input), new feature description: {description}, input description: {rel_agenda}. \
Define the function using the 'def' keyword. If the selected attribute is /, Handle the case of devide by zero."
)
chain = LLMChain(llm=llm, prompt=prompt)
func_str = chain.run({"new_feature": new_col, "relevant": rel_cols, "description": descr, "rel_agenda": rel_agenda})
start = func_str.find('def')
func_str = func_str[start:]
print(func_str)
return func_str
class AggregateOperator(Operator):
def __init__(self, agenda, model_prompt, y_attr, num_samples):
super().__init__(agenda, model_prompt, y_attr)
self.num_samples = num_samples
def __str__(self):
return f"groupby-aggregation operator."
def generate_new_feature(self, temp):
op_prompt = aggregator_prompt_sampling.format(y_attr= self.y_attr)
prompt = "Dataset feature description: " + self.data_agenda + "Downstream machine learning models:" + self.model_prompt +'\n'+ op_prompt
answer = gpt_sampling(prompt = prompt, n = self.num_samples, temperature=temp)
res_lst = []
for i in range(len(answer)):
try:
res_dic = re.search(r'{(.*?)}', answer[i])
answer_dict = eval(res_dic.group(0))
answer_dict['new_feature'] = 'GROUPBY_' + str(answer_dict['groupby_col']) + '_' + str(answer_dict['function']) + '_' + str(answer_dict['agg_col'])
print(answer_dict)
res_lst.append(answer_dict)
except:
print("result cannot parse")
res_lst.append(None)
return res_lst
class MultiExtractor(Operator):
def __init__(self, agenda, model_prompt, y_attr):
super().__init__(agenda, model_prompt, y_attr)
def __str__(self):
return f"Multiextract operator."
def generate_new_feature(self, temp):
data_prompt = self.data_agenda
op_prompt = extractor_prompt_sampling.format(y_attr= self.y_attr)
prompt = "Dataset feature description: " + data_prompt + "Downstream machine learning models:" + self.model_prompt +'\n'+ op_prompt
answer = gpt_sampling_extract(prompt = prompt, n = 1, temperature=temp)
try:
res_lst = []
answer_dict = eval(answer[0])
print(answer_dict)
res_lst.append(answer_dict)
except:
print("result cannot parse")
res_lst.append(None)
return res_lst
def find_function(self, result_dict, temp=0.3):
new_col, descr, rel_cols, rel_agenda = self.parse_output(result_dict)
prompt = extractor_function_prompt.format(new_feature= new_col, relevant= rel_cols, description= descr, rel_agenda= rel_agenda)
# Try to generate the function
answer = gpt_sampling_extract(prompt = prompt, n = 1, temperature=temp)[0]
print("=========================")
print(answer)
print("=========================")
if 'NEED' in answer:
print("Need to use text completion.")
return 'TEXT'
elif 'Cannot' in answer:
print("Cannot find a function or use text completion.")
return None
else:
if 'EXTERNAL' in answer:
print("External sources needed")
print(answer)
return None
else:
pattern = r"```python(.*?)```"
match = re.search(pattern, answer, re.DOTALL)
code = match.group(1).strip()
return code
def parse_output(self, result_dict):
new_col = result_dict['new_feature']
descr = result_dict['description']
rel_cols = result_dict['relevant']
rel_agenda = obtain_rel_agenda(rel_cols, self.data_agenda)
return new_col, descr, rel_cols, rel_agenda | [
"You are a data scientist specializing in feature engineering. Your task is to create a transformation function based on the provided input, output, new feature description, while considering input data types and ranges. Generate the most appropriate python function with +/-/*//to obtain new feature {new_feature} (output) using features {relevant} (input), new feature description: {description}, input description: {rel_agenda}. Define the function using the 'def' keyword. If the selected attribute is /, Handle the case of devide by zero.",
"\n",
"You are a data scientist specializing in feature engineering, where you excel in finding the most suitable operation to obtain new features based on attribute context. Attribute description: {data_prompt}, generate the most appropriate python function to obtain new feature {new_feature} (output) using feature {relevant} (input), function description: {description}. Do not provide a lambda function.",
"Downstream machine learning models:",
"You are a data scientist specializing in feature engineering, where you excel in finding the most suitable operation to obtain new features based on attribute context. Attribute description: {data_prompt}, generate the most appropriate python function with +/-/*//to obtain new feature {new_feature} (output) using features {relevant} (input), function description: {description}. If the selected attribute is /, Handle the case of devide by zero.",
"PLACEHOLDER,PLACEHOLDER\n",
"Downstream machine learning models: ",
"data_prompt",
"new_feature",
"relevant",
"rel_agenda",
"Dataset feature description: ",
"description",
"You are a data scientist specializing in feature engineering. Your task is to create a transformation function based on the provided input, output, new feature description, while considering input data types and ranges. Generate the most appropriate python function to obtain new feature(s) {new_feature} (output) using feature {relevant} (input), new feature description: {description}, input description: {rel_agenda}. Define the function using the 'def' keyword.",
"Attribute description: ",
"PLACEHOLDER, PLACEHOLDER\n"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.