Bitbucket Admin commited on
Commit
0a6168f
·
1 Parent(s): 9b12694

Initalize streamlit app

Browse files
Files changed (4) hide show
  1. .gitignore +1 -0
  2. Llama_logo.png +0 -0
  3. app.py +201 -0
  4. requirements.txt +4 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .env
Llama_logo.png ADDED
app.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Simplebot
2
+ @author: Vishnudhat Natarajan
3
+ @email: [email protected]
4
+
5
+ """
6
+ import numpy as np
7
+ import streamlit as st
8
+ from openai import OpenAI
9
+ import os
10
+ import sys
11
+ from dotenv import load_dotenv, dotenv_values
12
+ load_dotenv()
13
+
14
+
15
+
16
+
17
+
18
+ # initialize the client
19
+ client = OpenAI(
20
+ base_url="https://api-inference.huggingface.co/v1",
21
+ api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')
22
+ )
23
+
24
+
25
+
26
+
27
+ #Create supported models
28
+ model_links ={
29
+ "Meta-Llama-3-8B":"meta-llama/Meta-Llama-3-8B-Instruct",
30
+ "Mistral-7B":"mistralai/Mistral-7B-Instruct-v0.2",
31
+ "Gemma-7B":"google/gemma-1.1-7b-it",
32
+ "Gemma-2B":"google/gemma-1.1-2b-it",
33
+ "Zephyr-7B-β":"HuggingFaceH4/zephyr-7b-beta",
34
+
35
+ }
36
+
37
+ #Pull info about the model to display
38
+ model_info ={
39
+ "Mistral-7B":
40
+ {'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
41
+ \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
42
+ 'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
43
+ "Gemma-7B":
44
+ {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
45
+ \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **7 billion parameters.** \n""",
46
+ 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
47
+ "Gemma-2B":
48
+ {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
49
+ \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **2 billion parameters.** \n""",
50
+ 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
51
+ "Zephyr-7B":
52
+ {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
53
+ \nFrom Huggingface: \n\
54
+ Zephyr is a series of language models that are trained to act as helpful assistants. \
55
+ [Zephyr 7B Gemma](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1)\
56
+ is the third model in the series, and is a fine-tuned version of google/gemma-7b \
57
+ that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
58
+ 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png'},
59
+ "Zephyr-7B-β":
60
+ {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
61
+ \nFrom Huggingface: \n\
62
+ Zephyr is a series of language models that are trained to act as helpful assistants. \
63
+ [Zephyr-7B-β](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)\
64
+ is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
65
+ that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
66
+ 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},
67
+ "Meta-Llama-3-8B":
68
+ {'description':"""The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
69
+ \nIt was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.** \n""",
70
+ 'logo':'Llama_logo.png'},
71
+ }
72
+
73
+
74
+ #Random dog images for error message
75
+ random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
76
+ "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
77
+ "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
78
+ "1326984c-39b0-492c-a773-f120d747a7e2.jpg",
79
+ "42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
80
+ "8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
81
+ "ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
82
+ "027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
83
+ "08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
84
+ "0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
85
+ "0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
86
+ "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
87
+ "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
88
+
89
+
90
+
91
+ def reset_conversation():
92
+ '''
93
+ Resets Conversation
94
+ '''
95
+ st.session_state.conversation = []
96
+ st.session_state.messages = []
97
+ return None
98
+
99
+
100
+
101
+
102
+ # Define the available models
103
+ models =[key for key in model_links.keys()]
104
+
105
+ # Create the sidebar with the dropdown for model selection
106
+ selected_model = st.sidebar.selectbox("Select Model", models)
107
+
108
+ #Create a temperature slider
109
+ temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
110
+
111
+
112
+ #Add reset button to clear conversation
113
+ st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
114
+
115
+
116
+ # Create model description
117
+ st.sidebar.write(f"You're now chatting with **{selected_model}**")
118
+ st.sidebar.markdown(model_info[selected_model]['description'])
119
+ st.sidebar.image(model_info[selected_model]['logo'])
120
+ st.sidebar.markdown("*Generated content may be inaccurate or false.*")
121
+
122
+
123
+ if "prev_option" not in st.session_state:
124
+ st.session_state.prev_option = selected_model
125
+
126
+ if st.session_state.prev_option != selected_model:
127
+ st.session_state.messages = []
128
+ # st.write(f"Changed to {selected_model}")
129
+ st.session_state.prev_option = selected_model
130
+ reset_conversation()
131
+
132
+
133
+
134
+ #Pull in the model we want to use
135
+ repo_id = model_links[selected_model]
136
+
137
+
138
+ st.subheader(f'AI - {selected_model}')
139
+ # st.title(f'ChatBot Using {selected_model}')
140
+
141
+ # Set a default model
142
+ if selected_model not in st.session_state:
143
+ st.session_state[selected_model] = model_links[selected_model]
144
+
145
+ # Initialize chat history
146
+ if "messages" not in st.session_state:
147
+ st.session_state.messages = []
148
+
149
+
150
+ # Display chat messages from history on app rerun
151
+ for message in st.session_state.messages:
152
+ with st.chat_message(message["role"]):
153
+ st.markdown(message["content"])
154
+
155
+
156
+
157
+ # Accept user input
158
+ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
159
+
160
+ # Display user message in chat message container
161
+ with st.chat_message("user"):
162
+ st.markdown(prompt)
163
+ # Add user message to chat history
164
+ st.session_state.messages.append({"role": "user", "content": prompt})
165
+
166
+
167
+ # Display assistant response in chat message container
168
+ with st.chat_message("assistant"):
169
+
170
+ try:
171
+ stream = client.chat.completions.create(
172
+ model=model_links[selected_model],
173
+ messages=[
174
+ {"role": m["role"], "content": m["content"]}
175
+ for m in st.session_state.messages
176
+ ],
177
+ temperature=temp_values,#0.5,
178
+ stream=True,
179
+ max_tokens=3000,
180
+ )
181
+
182
+ response = st.write_stream(stream)
183
+
184
+ except Exception as e:
185
+ # st.empty()
186
+ response = "😵‍💫 Looks like someone unplugged something!\
187
+ \n Either the model space is being updated or something is down.\
188
+ \n\
189
+ \n Try again later. \
190
+ \n\
191
+ \n Here's a random pic of a 🐶:"
192
+ st.write(response)
193
+ random_dog_pick = 'https://random.dog/'+ random_dog[np.random.randint(len(random_dog))]
194
+ st.image(random_dog_pick)
195
+ st.write("This was the error message:")
196
+ st.write(e)
197
+
198
+
199
+
200
+
201
+ st.session_state.messages.append({"role": "assistant", "content": response})
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ openai
2
+ langchain
3
+ python-dotenv
4
+ langchain-community