Spaces:
Running
Running
import streamlit as st | |
import random | |
import time | |
from openai import OpenAI | |
import pandas as pd | |
import elemeta.nlp.runners.metafeature_extractors_runner as metafeature_extractors_runner | |
from elemeta.nlp.runners.metafeature_extractors_runner import MetafeatureExtractorsRunner | |
from elemeta.nlp.extractors.high_level.text_length import TextLength | |
from elemeta.nlp.extractors.high_level.text_complexity import TextComplexity | |
from elemeta.nlp.extractors.high_level.word_count import WordCount | |
from elemeta.nlp.extractors.high_level.detect_language_langdetect import DetectLanguage | |
from elemeta.nlp.extractors.high_level.sentiment_polarity import SentimentPolarity | |
from elemeta.nlp.extractors.high_level.toxicity_extractor import ToxicityExtractor | |
runner = MetafeatureExtractorsRunner(metafeature_extractors=[TextLength(),WordCount(),DetectLanguage() | |
,SentimentPolarity(),TextComplexity(),ToxicityExtractor()]) | |
def ask_gpt(messages,model="gpt-3.5-turbo"): | |
ret = client.chat.completions.create(model=model, | |
messages=messages | |
) | |
return ret.choices[0].message.content | |
client = OpenAI() | |
st.title("Elemeta Chat") | |
st.header("Chat") | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
# Accept user input | |
if prompt := st.chat_input("Enter prompt to send to assistant"): | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
# Display user message in chat message container | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
# Display assistant response in chat message container | |
with st.chat_message("assistant"): | |
message_placeholder = st.empty() | |
full_response = "" | |
assistant_response = ask_gpt(messages=st.session_state.messages) | |
# Simulate stream of response with milliseconds delay | |
for chunk in assistant_response.split(): | |
full_response += chunk + " " | |
time.sleep(0.05) | |
# Add a blinking cursor to simulate typing | |
message_placeholder.markdown(full_response + "β") | |
message_placeholder.markdown(full_response) | |
# Add assistant response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": full_response}) | |
user_messages = [message["content"] for message in st.session_state.messages if message["role"] == "user"] | |
assistant_messages = [message["content"] for message in st.session_state.messages if message["role"] == "assistant"] | |
# st.write("User Messages",user_messages) | |
# st.write("Assistant Messages",assistant_messages) | |
user_df = pd.DataFrame([runner.run(user_prompt) for user_prompt in user_messages]) | |
user_df["prompt"] = user_messages | |
user_df.columns = 'user_' + user_df.columns.values | |
# st.dataframe(user_df) | |
assistant_df = pd.DataFrame([runner.run(assistant_prompt) for assistant_prompt in assistant_messages]) | |
assistant_df["prompt"] = assistant_messages | |
assistant_df.columns = 'assistant_' + assistant_df.columns.values | |
# st.dataframe(assistant_df) | |
st.subheader("Chat Metafeatures") | |
st.dataframe(pd.concat([user_df,assistant_df],axis=1)) |