Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -7,35 +7,85 @@ from dotenv import load_dotenv
|
|
7 |
# Load environment variables
|
8 |
load_dotenv()
|
9 |
|
10 |
-
# Initialize the
|
|
|
|
|
|
|
11 |
client = OpenAI(
|
12 |
-
|
13 |
-
api_key=os.environ.get('API_KEY') # Replace with your token
|
14 |
)
|
15 |
|
16 |
-
#
|
17 |
model_links = {
|
18 |
"Meta-Llama-3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
19 |
"Meta-Llama-3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
}
|
22 |
|
23 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
def reset_conversation():
|
25 |
st.session_state.conversation = []
|
26 |
st.session_state.messages = []
|
27 |
|
28 |
-
#
|
29 |
models = [key for key in model_links.keys()]
|
|
|
|
|
30 |
selected_model = st.sidebar.selectbox("Select Model", models)
|
|
|
|
|
31 |
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, 0.5)
|
32 |
-
st.sidebar.button('Reset Chat', on_click=reset_conversation)
|
33 |
|
|
|
|
|
|
|
|
|
34 |
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
35 |
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
|
36 |
st.sidebar.markdown("\n[TypeGPT](https://typegpt.net).")
|
37 |
|
38 |
-
#
|
39 |
if "prev_option" not in st.session_state:
|
40 |
st.session_state.prev_option = selected_model
|
41 |
|
@@ -44,12 +94,15 @@ if st.session_state.prev_option != selected_model:
|
|
44 |
st.session_state.prev_option = selected_model
|
45 |
reset_conversation()
|
46 |
|
47 |
-
#
|
48 |
repo_id = model_links[selected_model]
|
49 |
|
50 |
-
# Main chat interface
|
51 |
st.subheader(f'TypeGPT.net - {selected_model}')
|
52 |
|
|
|
|
|
|
|
|
|
53 |
# Initialize chat history
|
54 |
if "messages" not in st.session_state:
|
55 |
st.session_state.messages = []
|
@@ -61,10 +114,13 @@ for message in st.session_state.messages:
|
|
61 |
|
62 |
# Accept user input
|
63 |
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
|
|
|
64 |
with st.chat_message("user"):
|
65 |
st.markdown(prompt)
|
|
|
66 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
67 |
|
|
|
68 |
with st.chat_message("assistant"):
|
69 |
try:
|
70 |
stream = client.chat.completions.create(
|
@@ -78,15 +134,21 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
|
|
78 |
max_tokens=3000,
|
79 |
)
|
80 |
response = st.write_stream(stream)
|
|
|
81 |
except Exception as e:
|
82 |
-
response = "π΅βπ« Looks like
|
|
|
|
|
83 |
st.write(response)
|
|
|
|
|
|
|
|
|
84 |
|
85 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
86 |
|
87 |
|
88 |
|
89 |
-
|
90 |
# import gradio as gr
|
91 |
# from huggingface_hub import InferenceClient
|
92 |
|
|
|
7 |
# Load environment variables
|
8 |
load_dotenv()
|
9 |
|
10 |
+
# Initialize the Hugging Face client
|
11 |
+
hf_api_key = os.getenv('HF_API_KEY') # Replace with your Hugging Face API key
|
12 |
+
openai_api_key = os.getenv('OPENAI_API_KEY') # Replace with your OpenAI API key
|
13 |
+
|
14 |
client = OpenAI(
|
15 |
+
api_key=openai_api_key
|
|
|
16 |
)
|
17 |
|
18 |
+
# Create supported models
|
19 |
model_links = {
|
20 |
"Meta-Llama-3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
21 |
"Meta-Llama-3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
22 |
+
"Meta-Llama-3.1-405B-Instruct-FP8": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
|
23 |
+
"Meta-Llama-3.1-405B-Instruct": "meta-llama/Meta-Llama-3.1-405B-Instruct",
|
24 |
+
"Mistral-Nemo-Instruct-2407": "mistralai/Mistral-Nemo-Instruct-2407",
|
25 |
+
"Meta-Llama-3-70B-Instruct": "meta-llama/Meta-Llama-3-70B-Instruct",
|
26 |
+
"Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct",
|
27 |
+
"C4ai-command-r-plus": "CohereForAI/c4ai-command-r-plus",
|
28 |
+
"Aya-23-35B": "CohereForAI/aya-23-35B",
|
29 |
+
"Zephyr-orpo-141b-A35b-v0.1": "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
|
30 |
+
"Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
31 |
+
"Codestral-22B-v0.1": "mistralai/Codestral-22B-v0.1",
|
32 |
+
"Nous-Hermes-2-Mixtral-8x7B-DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
33 |
+
"Yi-1.5-34B-Chat": "01-ai/Yi-1.5-34B-Chat",
|
34 |
+
"Gemma-2-27b-it": "google/gemma-2-27b-it",
|
35 |
+
"Meta-Llama-2-70B-Chat-HF": "meta-llama/Llama-2-70b-chat-hf",
|
36 |
+
"Meta-Llama-2-7B-Chat-HF": "meta-llama/Llama-2-7b-chat-hf",
|
37 |
+
"Meta-Llama-2-13B-Chat-HF": "meta-llama/Llama-2-13b-chat-hf",
|
38 |
+
"Mistral-7B-Instruct-v0.1": "mistralai/Mistral-7B-Instruct-v0.1",
|
39 |
+
"Mistral-7B-Instruct-v0.2": "mistralai/Mistral-7B-Instruct-v0.2",
|
40 |
+
"Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
|
41 |
+
"Falcon-7b-Instruct": "tiiuae/falcon-7b-instruct",
|
42 |
+
"Starchat2-15b-v0.1": "HuggingFaceH4/starchat2-15b-v0.1",
|
43 |
+
"Gemma-1.1-7b-it": "google/gemma-1.1-7b-it",
|
44 |
+
"Gemma-1.1-2b-it": "google/gemma-1.1-2b-it",
|
45 |
+
"Zephyr-7B-Beta": "HuggingFaceH4/zephyr-7b-beta",
|
46 |
+
"Zephyr-7B-Alpha": "HuggingFaceH4/zephyr-7b-alpha",
|
47 |
+
"Phi-3-mini-128k-instruct": "microsoft/Phi-3-mini-128k-instruct",
|
48 |
+
"Phi-3-mini-4k-instruct": "microsoft/Phi-3-mini-4k-instruct",
|
49 |
}
|
50 |
|
51 |
+
# Random dog images for error message
|
52 |
+
random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
|
53 |
+
"1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
|
54 |
+
"526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
|
55 |
+
"1326984c-39b0-492c-a773-f120d747a7e2.jpg",
|
56 |
+
"42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
|
57 |
+
"8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
|
58 |
+
"ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
|
59 |
+
"027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
|
60 |
+
"08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
|
61 |
+
"0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
|
62 |
+
"0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
|
63 |
+
"6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
|
64 |
+
"bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
|
65 |
+
|
66 |
+
# Reset conversation
|
67 |
def reset_conversation():
|
68 |
st.session_state.conversation = []
|
69 |
st.session_state.messages = []
|
70 |
|
71 |
+
# Define the available models
|
72 |
models = [key for key in model_links.keys()]
|
73 |
+
|
74 |
+
# Create the sidebar with the dropdown for model selection
|
75 |
selected_model = st.sidebar.selectbox("Select Model", models)
|
76 |
+
|
77 |
+
# Create a temperature slider
|
78 |
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, 0.5)
|
|
|
79 |
|
80 |
+
# Add reset button to clear conversation
|
81 |
+
st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
|
82 |
+
|
83 |
+
# Create model description
|
84 |
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
85 |
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
|
86 |
st.sidebar.markdown("\n[TypeGPT](https://typegpt.net).")
|
87 |
|
88 |
+
# Initialize previous option and messages
|
89 |
if "prev_option" not in st.session_state:
|
90 |
st.session_state.prev_option = selected_model
|
91 |
|
|
|
94 |
st.session_state.prev_option = selected_model
|
95 |
reset_conversation()
|
96 |
|
97 |
+
# Pull in the model we want to use
|
98 |
repo_id = model_links[selected_model]
|
99 |
|
|
|
100 |
st.subheader(f'TypeGPT.net - {selected_model}')
|
101 |
|
102 |
+
# Set a default model
|
103 |
+
if selected_model not in st.session_state:
|
104 |
+
st.session_state[selected_model] = model_links[selected_model]
|
105 |
+
|
106 |
# Initialize chat history
|
107 |
if "messages" not in st.session_state:
|
108 |
st.session_state.messages = []
|
|
|
114 |
|
115 |
# Accept user input
|
116 |
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
|
117 |
+
# Display user message in chat message container
|
118 |
with st.chat_message("user"):
|
119 |
st.markdown(prompt)
|
120 |
+
# Add user message to chat history
|
121 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
122 |
|
123 |
+
# Display assistant response in chat message container
|
124 |
with st.chat_message("assistant"):
|
125 |
try:
|
126 |
stream = client.chat.completions.create(
|
|
|
134 |
max_tokens=3000,
|
135 |
)
|
136 |
response = st.write_stream(stream)
|
137 |
+
|
138 |
except Exception as e:
|
139 |
+
response = ("π΅βπ« Looks like someone unplugged something! "
|
140 |
+
"Either the model space is being updated or something is down. "
|
141 |
+
"Try again later. Here's a random pic of a πΆ:")
|
142 |
st.write(response)
|
143 |
+
random_dog_pick = 'https://random.dog/' + random_dog[np.random.randint(len(random_dog))]
|
144 |
+
st.image(random_dog_pick)
|
145 |
+
st.write("This was the error message:")
|
146 |
+
st.write(e)
|
147 |
|
148 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
149 |
|
150 |
|
151 |
|
|
|
152 |
# import gradio as gr
|
153 |
# from huggingface_hub import InferenceClient
|
154 |
|