Spaces:
Runtime error
Runtime error
File size: 4,084 Bytes
002b092 9f54a3b dab5cc9 9f54a3b e8f079f 9f54a3b 8ff67d3 9f54a3b 8ff67d3 9f54a3b 8ff67d3 0ca86ba 9f54a3b 8ff67d3 002b092 142827c 8ff67d3 142827c 9f54a3b 8ff67d3 9f54a3b 8ff67d3 142827c 8ff67d3 142827c 8ff67d3 142827c 0ca86ba 142827c 93979c3 eabc41f 8ff67d3 9f54a3b ac02b56 9f54a3b 8ff67d3 9f54a3b 3bb7e5d 9f54a3b 002b092 8ff67d3 002b092 8ff67d3 002b092 8ff67d3 002b092 8ff67d3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 |
import numpy as np
import streamlit as st
from openai import OpenAI
import os
import sys
from dotenv import load_dotenv, dotenv_values
load_dotenv()
# initialize the client
client = OpenAI(
base_url="https://api-inference.huggingface.co/v1",
api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token
)
# Create supported model
model_links = {
"Meta-Llama-3-8B": "meta-llama/Meta-Llama-3-8B-Instruct"
}
# Pull info about the model to display
model_info = {
"Meta-Llama-3-8B": {
'description': """The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n
\nIt was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.** \n""",
'logo': 'Llama_logo.png'
}
}
# Random dog images for error message
random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg", "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg"]
def reset_conversation():
'''Resets Conversation'''
st.session_state.conversation = []
st.session_state.messages = []
return None
# Define the available models
models = [key for key in model_links.keys()]
# Create the sidebar with the dropdown for model selection
selected_model = st.sidebar.selectbox("Select Model", models)
# Custom description for SciMom
st.sidebar.write("Built for my mom, with love. This model is pretrained with textbooks of Science NCERT.")
st.sidebar.write("Model used: Meta Llama, trained using: Docker AutoTrain.")
# Create a temperature slider
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
# Add reset button to clear conversation
st.sidebar.button('Reset Chat', on_click=reset_conversation)
# Create model description
st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.markdown(model_info[selected_model]['description'])
st.sidebar.image(model_info[selected_model]['logo'])
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
st.session_state.prev_option = selected_model
reset_conversation()
# Pull in the model we want to use
repo_id = model_links[selected_model]
st.subheader(f'AI - {selected_model}')
# Set a default model
if selected_model not in st.session_state:
st.session_state[selected_model] = model_links[selected_model]
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
# Display assistant response in chat message container
with st.chat_message("assistant"):
try:
stream = client.chat.completions.create(
model=model_links[selected_model],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
temperature=temp_values,
stream=True,
max_tokens=3000,
)
response = st.write_stream(stream)
except Exception as e:
response = "😵💫 Looks like something went wrong! Here's a random pic of a 🐶:"
st.write(response)
random_dog_pick = 'https://random.dog/' + random_dog[np.random.randint(len(random_dog))]
st.image(random_dog_pick)
st.write("This was the error message:")
st.write(e)
st.session_state.messages.append({"role": "assistant", "content": response})
|