chat / app.py
saikub's picture
Update app.py
a69da5a verified
raw
history blame
9.17 kB
import numpy as np
import streamlit as st
from openai import OpenAI
import os
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Initialize the Hugging Face client
hf_api_key = os.getenv('HF_API_KEY') # Replace with your Hugging Face API key
openai_api_key = os.getenv('OPENAI_API_KEY') # Replace with your OpenAI API key
client = OpenAI(
api_key=openai_api_key
)
# Create supported models
model_links = {
"Meta-Llama-3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct",
"Meta-Llama-3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"Meta-Llama-3.1-405B-Instruct-FP8": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
"Meta-Llama-3.1-405B-Instruct": "meta-llama/Meta-Llama-3.1-405B-Instruct",
"Mistral-Nemo-Instruct-2407": "mistralai/Mistral-Nemo-Instruct-2407",
"Meta-Llama-3-70B-Instruct": "meta-llama/Meta-Llama-3-70B-Instruct",
"Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct",
"C4ai-command-r-plus": "CohereForAI/c4ai-command-r-plus",
"Aya-23-35B": "CohereForAI/aya-23-35B",
"Zephyr-orpo-141b-A35b-v0.1": "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
"Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"Codestral-22B-v0.1": "mistralai/Codestral-22B-v0.1",
"Nous-Hermes-2-Mixtral-8x7B-DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"Yi-1.5-34B-Chat": "01-ai/Yi-1.5-34B-Chat",
"Gemma-2-27b-it": "google/gemma-2-27b-it",
"Meta-Llama-2-70B-Chat-HF": "meta-llama/Llama-2-70b-chat-hf",
"Meta-Llama-2-7B-Chat-HF": "meta-llama/Llama-2-7b-chat-hf",
"Meta-Llama-2-13B-Chat-HF": "meta-llama/Llama-2-13b-chat-hf",
"Mistral-7B-Instruct-v0.1": "mistralai/Mistral-7B-Instruct-v0.1",
"Mistral-7B-Instruct-v0.2": "mistralai/Mistral-7B-Instruct-v0.2",
"Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
"Falcon-7b-Instruct": "tiiuae/falcon-7b-instruct",
"Starchat2-15b-v0.1": "HuggingFaceH4/starchat2-15b-v0.1",
"Gemma-1.1-7b-it": "google/gemma-1.1-7b-it",
"Gemma-1.1-2b-it": "google/gemma-1.1-2b-it",
"Zephyr-7B-Beta": "HuggingFaceH4/zephyr-7b-beta",
"Zephyr-7B-Alpha": "HuggingFaceH4/zephyr-7b-alpha",
"Phi-3-mini-128k-instruct": "microsoft/Phi-3-mini-128k-instruct",
"Phi-3-mini-4k-instruct": "microsoft/Phi-3-mini-4k-instruct",
}
# Random dog images for error message
random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
"1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
"526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
"1326984c-39b0-492c-a773-f120d747a7e2.jpg",
"42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
"8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
"ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
"027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
"08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
"0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
"0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
"6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
"bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
# Reset conversation
def reset_conversation():
st.session_state.conversation = []
st.session_state.messages = []
# Define the available models
models = [key for key in model_links.keys()]
# Create the sidebar with the dropdown for model selection
selected_model = st.sidebar.selectbox("Select Model", models)
# Create a temperature slider
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, 0.5)
# Add reset button to clear conversation
st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
# Create model description
st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
st.sidebar.markdown("\n[TypeGPT](https://typegpt.net).")
# Initialize previous option and messages
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
st.session_state.prev_option = selected_model
reset_conversation()
# Pull in the model we want to use
repo_id = model_links[selected_model]
st.subheader(f'TypeGPT.net - {selected_model}')
# Set a default model
if selected_model not in st.session_state:
st.session_state[selected_model] = model_links[selected_model]
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display assistant response in chat message container
with st.chat_message("assistant"):
try:
stream = client.chat.completions.create(
model=model_links[selected_model],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
temperature=temp_values,
stream=True,
max_tokens=3000,
)
response = st.write_stream(stream)
except Exception as e:
response = ("šŸ˜µā€šŸ’« Looks like someone unplugged something! "
"Either the model space is being updated or something is down. "
"Try again later. Here's a random pic of a šŸ¶:")
st.write(response)
random_dog_pick = 'https://random.dog/' + random_dog[np.random.randint(len(random_dog))]
st.image(random_dog_pick)
st.write("This was the error message:")
st.write(e)
st.session_state.messages.append({"role": "assistant", "content": response})
# import gradio as gr
# from huggingface_hub import InferenceClient
# """
# For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
# """
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# def respond(
# message,
# history: list[tuple[str, str]],
# system_message,
# max_tokens,
# temperature,
# top_p,
# ):
# messages = [{"role": "system", "content": system_message}]
# for val in history:
# if val[0]:
# messages.append({"role": "user", "content": val[0]})
# if val[1]:
# messages.append({"role": "assistant", "content": val[1]})
# messages.append({"role": "user", "content": message})
# response = ""
# for message in client.chat_completion(
# messages,
# max_tokens=max_tokens,
# stream=True,
# temperature=temperature,
# top_p=top_p,
# ):
# token = message.choices[0].delta.content
# response += token
# yield response
# """
# For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
# """
# demo = gr.ChatInterface(
# respond,
# additional_inputs=[
# gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
# gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
# gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
# gr.Slider(
# minimum=0.1,
# maximum=1.0,
# value=0.95,
# step=0.05,
# label="Top-p (nucleus sampling)",
# ),
# ],
# )
# if __name__ == "__main__":
# demo.launch()
#####################################
# import gradio as gr
# gr.load("models/meta-llama/Meta-Llama-3.1-70B-Instruct").launch()
########################################
# import streamlit as st
# from transformers import AutoTokenizer, AutoModelForCausalLM
# # Load model directly
# tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct")
# model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct")
# # Initialize chat history
# if "chat_history" not in st.session_state:
# st.session_state.chat_history = []
# # Display chat history
# for chat in st.session_state.chat_history:
# st.write(f"User: {chat['user']}")
# st.write(f"Response: {chat['response']}")
# # Get user input
# user_input = st.text_input("Enter your message:")
# # Generate response
# if st.button("Send"):
# inputs = tokenizer(user_input, return_tensors="pt")
# outputs = model.generate(**inputs)
# response = tokenizer.decode(outputs[0], skip_special_tokens=True)
# st.session_state.chat_history.append({"user": user_input, "response": response})
# st.write(f"Response: {response}")