Spaces:
Sleeping
Sleeping
import streamlit as st | |
from utils import Recorder, record_audio, play_mp3 | |
import os | |
import requests | |
import ast | |
import json | |
from openai import OpenAI | |
import random | |
from dotenv import load_dotenv | |
import time | |
load_dotenv() | |
api_key = os.getenv("OPENAI_API_KEY") | |
# st.title("PinocchioLand!") | |
# # Display an image | |
# st.image( | |
# "https://i.pinimg.com/736x/30/e9/36/30e936e18912e9a5670b88ec94630b4a.jpg", | |
# use_column_width=True, | |
# ) | |
client = OpenAI(api_key=api_key) | |
st.title("Chat with Pinocchio") | |
if "count1" not in st.session_state: | |
st.session_state.count1 = 0 | |
if "openai_model" not in st.session_state: | |
st.session_state["openai_model"] = "gpt-3.5-turbo" | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
n = len(st.session_state.messages) | |
if prompt := st.chat_input("What is up?"): | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
with st.chat_message("assistant"): | |
# LLM | |
url = "http://localhost:8000/llm_query/" | |
# Append the query parameter to the URL | |
llm_response = requests.post(url=url, params={"llm_query": str(prompt)}) | |
data = llm_response.json() | |
inner_data = json.loads(data["response_text"]) | |
# Now, you can access the data from the inner JSON | |
risposta = inner_data.get("risposta") | |
stato = inner_data.get("stato") | |
tipologia = inner_data.get("tipologia") | |
st.write(inner_data) | |
st.session_state.messages.append({"role": "assistant", "content": inner_data}) | |
# st.session_state.count1 += 1 | |