Spaces:
Running
Running
import numpy as np | |
import streamlit as st | |
from openai import OpenAI | |
import os | |
from dotenv import load_dotenv | |
# Load environment variables | |
load_dotenv() | |
# Initialize the OpenAI client | |
client = OpenAI( | |
base_url="https://api-inference.huggingface.co/v1", | |
api_key=os.environ.get('API_KEY') # Replace with your token | |
) | |
# Define model links | |
model_links = { | |
"Meta-Llama-3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct", | |
"Meta-Llama-3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct", | |
# Add more models as needed | |
} | |
# Function to reset conversation | |
def reset_conversation(): | |
st.session_state.conversation = [] | |
st.session_state.messages = [] | |
# Sidebar setup | |
models = [key for key in model_links.keys()] | |
selected_model = st.sidebar.selectbox("Select Model", models) | |
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, 0.5) | |
st.sidebar.button('Reset Chat', on_click=reset_conversation) | |
st.sidebar.write(f"You're now chatting with **{selected_model}**") | |
st.sidebar.markdown("*Generated content may be inaccurate or false.*") | |
st.sidebar.markdown("\n[TypeGPT](https://typegpt.net).") | |
# Manage session state | |
if "prev_option" not in st.session_state: | |
st.session_state.prev_option = selected_model | |
if st.session_state.prev_option != selected_model: | |
st.session_state.messages = [] | |
st.session_state.prev_option = selected_model | |
reset_conversation() | |
# Model repository id | |
repo_id = model_links[selected_model] | |
# Main chat interface | |
st.subheader(f'TypeGPT.net - {selected_model}') | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
# Accept user input | |
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"): | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
with st.chat_message("assistant"): | |
try: | |
stream = client.chat.completions.create( | |
model=model_links[selected_model], | |
messages=[ | |
{"role": m["role"], "content": m["content"]} | |
for m in st.session_state.messages | |
], | |
temperature=temp_values, | |
stream=True, | |
max_tokens=3000, | |
) | |
response = st.write_stream(stream) | |
except Exception as e: | |
response = "π΅βπ« Looks like something went wrong! Please try again later." | |
st.write(response) | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
# import gradio as gr | |
# from huggingface_hub import InferenceClient | |
# """ | |
# For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference | |
# """ | |
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
# def respond( | |
# message, | |
# history: list[tuple[str, str]], | |
# system_message, | |
# max_tokens, | |
# temperature, | |
# top_p, | |
# ): | |
# messages = [{"role": "system", "content": system_message}] | |
# for val in history: | |
# if val[0]: | |
# messages.append({"role": "user", "content": val[0]}) | |
# if val[1]: | |
# messages.append({"role": "assistant", "content": val[1]}) | |
# messages.append({"role": "user", "content": message}) | |
# response = "" | |
# for message in client.chat_completion( | |
# messages, | |
# max_tokens=max_tokens, | |
# stream=True, | |
# temperature=temperature, | |
# top_p=top_p, | |
# ): | |
# token = message.choices[0].delta.content | |
# response += token | |
# yield response | |
# """ | |
# For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface | |
# """ | |
# demo = gr.ChatInterface( | |
# respond, | |
# additional_inputs=[ | |
# gr.Textbox(value="You are a friendly Chatbot.", label="System message"), | |
# gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
# gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
# gr.Slider( | |
# minimum=0.1, | |
# maximum=1.0, | |
# value=0.95, | |
# step=0.05, | |
# label="Top-p (nucleus sampling)", | |
# ), | |
# ], | |
# ) | |
# if __name__ == "__main__": | |
# demo.launch() | |
##################################### | |
# import gradio as gr | |
# gr.load("models/meta-llama/Meta-Llama-3.1-70B-Instruct").launch() | |
######################################## | |
# import streamlit as st | |
# from transformers import AutoTokenizer, AutoModelForCausalLM | |
# # Load model directly | |
# tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct") | |
# model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct") | |
# # Initialize chat history | |
# if "chat_history" not in st.session_state: | |
# st.session_state.chat_history = [] | |
# # Display chat history | |
# for chat in st.session_state.chat_history: | |
# st.write(f"User: {chat['user']}") | |
# st.write(f"Response: {chat['response']}") | |
# # Get user input | |
# user_input = st.text_input("Enter your message:") | |
# # Generate response | |
# if st.button("Send"): | |
# inputs = tokenizer(user_input, return_tensors="pt") | |
# outputs = model.generate(**inputs) | |
# response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# st.session_state.chat_history.append({"user": user_input, "response": response}) | |
# st.write(f"Response: {response}") |