Spaces:
Runtime error
Runtime error
File size: 5,739 Bytes
2fdfcc2 96bb0fe 2fdfcc2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
import streamlit as st
import os
import random
import time
from module.__custom__ import *
from streamlit_extras.switch_page_button import switch_page
# Openai API Key
import openai
import json
def read_api_key_from_secrets(file_path='secrets.json'):
try:
with open(file_path, 'r') as secrets_file:
secrets_data = json.load(secrets_file)
openai_api_key = secrets_data.get('openai_api_key')
if openai_api_key is not None:
return openai_api_key
else:
raise KeyError("'openai_api_key' not found in secrets.json")
except FileNotFoundError:
raise FileNotFoundError(f"The file {file_path} was not found.")
except json.JSONDecodeError:
raise ValueError(f"Error decoding JSON in {file_path}. Please check the file format.")
# Example usage
try:
# key = read_api_key_from_secrets()
openai.api_key = key
os.environ['OPENAI_API_KEY'] = key
print(f"OpenAI API Key Found")
except (FileNotFoundError, ValueError, KeyError) as e:
print(f"Error: {e}")
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
embedding = OpenAIEmbeddings()
# from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
# embedding = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
# LLM
from langchain.chat_models import ChatOpenAI
llm_name = "gpt-3.5-turbo"
llm = ChatOpenAI(model_name=llm_name, temperature=0)
# load from disk
db_cos = Chroma(
persist_directory="./data/docs/chroma_cos",
embedding_function=embedding
)
db_plot = Chroma(
persist_directory="./data/docs/chroma_plot",
embedding_function=embedding
)
metadata_field_info = [
AttributeInfo(
name="name",
description="The name of the video game on steam",
type="string",
)
]
document_content_description = "Brief summary of a video game on Steam"
with st.sidebar: is_plot = st.toggle('Enable Plot')
db_selected = db_cos
if is_plot: db_selected = db_plot
retriever = SelfQueryRetriever.from_llm(
llm,
db_selected,
document_content_description,
metadata_field_info,
enable_limit=True,
)
emoji = '🕹️ GameInsightify'
st.header(emoji)
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
if 'gamenames' not in st.session_state:
st.session_state.gamenames = []
# Slider on range and button to clear chat history
col1, col2= st.columns([8,2])
with col1:
st.title("Game Recommender")
with col2:
if st.button("Clear chat"):
st.session_state.messages = []
st.session_state.gamenames = []
# Display chat messages from history on app rerun
tab1, tab2= st.tabs(['Chatbot', ' '])
with tab1: # this tab exist becasue i have to limit the height of chatbot
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
with tab2: pass # this tab exist becasue i have to limit the height of chatbot
# Accept user input
if prompt := st.chat_input("Need a game recommendation?"):
st.session_state.messages.append({"role": "user", "content": prompt}) # Add user message to chat history
with st.chat_message("user"): # Display user message in chat message container
st.markdown(prompt)
with st.chat_message("assistant"): # Display assistant response in chat message container
message_placeholder = st.empty()
# docs = db.max_marginal_relevance_search(prompt,k=query_num, fetch_k=10) # Sending query to db
docs = retriever.invoke(prompt) # retrieve response from chatgpt
full_response = random.choice( # 1st sentence of response
["I recommend the following games:\n",
f"Hi, human! These are the {len(docs)} best games:\n",
f"I bet you will love these {len(docs)} games:\n",]
)
# formatting response from db
top_games = []
assistant_response = ""
for idx, doc in enumerate(docs):
gamename = doc.metadata['name']
top_games.append(gamename)
assistant_response += f"{idx+1}. {gamename}\n"
# separating response into chunk of words
chunks = []
for line in assistant_response.splitlines():
for word in line.split() : chunks.append(word)
chunks.append('\n')
chunks = chunks[0:-1]
# Simulate stream of response with milliseconds delay
for chunk in chunks:
full_response += chunk + " "
time.sleep(0.05)
message_placeholder.markdown(full_response + "▌") # Add a blinking cursor to simulate typing
message_placeholder.markdown(full_response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": full_response})
if is_plot: st.session_state.gamenames.append(top_games)
col1, col2, col3= st.columns([4,2,4])
with col2:
if is_plot and db_selected==db_plot:
if st.button("Plot Games"): # button in center column
switch_page('Overall')
# Styling on Tabs
css=f'''
div.stTabs {{
height: 40vh;
overflow-y: scroll;
overflow-x: hidden;
}}
</style>
'''
st.markdown(f'<style>{css}</style>', unsafe_allow_html=True) |