Spaces:
Sleeping
Sleeping
Commit
·
a74068a
1
Parent(s):
4122eb1
Delete app.py
Browse files
app.py
DELETED
@@ -1,142 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import streamlit as st
|
3 |
-
from langchain.llms import HuggingFaceHub
|
4 |
-
from langchain.chains import LLMChain
|
5 |
-
from langchain.prompts import PromptTemplate
|
6 |
-
|
7 |
-
class UserInterface():
|
8 |
-
|
9 |
-
def __init__(self, ):
|
10 |
-
st.warning("Warning: Some models may not work and some models may require GPU to run")
|
11 |
-
st.text("An Open Source Chat Application")
|
12 |
-
st.header("Open LLMs")
|
13 |
-
|
14 |
-
# self.API_KEY = st.sidebar.text_input(
|
15 |
-
# 'API Key',
|
16 |
-
# type='password',
|
17 |
-
# help="Type in your HuggingFace API key to use this app"
|
18 |
-
# )
|
19 |
-
|
20 |
-
models_name =("HuggingFaceH4/zephyr-7b-beta", )
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
self.temperature = st.sidebar.slider(
|
25 |
-
label='Temperature',
|
26 |
-
min_value=0.1,
|
27 |
-
max_value=1.0,
|
28 |
-
step=0.1,
|
29 |
-
value=0.5,
|
30 |
-
help="Set the temperature to get accurate or random result"
|
31 |
-
)
|
32 |
-
|
33 |
-
self.max_token_length = st.sidebar.slider(
|
34 |
-
label="Token Length",
|
35 |
-
min_value=32,
|
36 |
-
max_value=2048,
|
37 |
-
step=16,
|
38 |
-
value=64,
|
39 |
-
help="Set max tokens to generate maximum amount of text output"
|
40 |
-
)
|
41 |
-
|
42 |
-
|
43 |
-
self.model_kwargs = {
|
44 |
-
"temperature": self.temperature,
|
45 |
-
"max_new_tokens": self.max_token_length
|
46 |
-
}
|
47 |
-
|
48 |
-
os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.getenv("HF_KEY")
|
49 |
-
|
50 |
-
|
51 |
-
def form_data(self):
|
52 |
-
|
53 |
-
try:
|
54 |
-
# if not self.API_KEY.startswith('hf_'):
|
55 |
-
# st.warning('Please enter your API key!', icon='⚠')
|
56 |
-
# text_input_visibility = True
|
57 |
-
# else:
|
58 |
-
# text_input_visibility = False
|
59 |
-
text_input_visibility = False
|
60 |
-
|
61 |
-
|
62 |
-
if "messages" not in st.session_state:
|
63 |
-
st.session_state.messages = []
|
64 |
-
|
65 |
-
st.write(f"You are using {self.models} model")
|
66 |
-
|
67 |
-
for message in st.session_state.messages:
|
68 |
-
with st.chat_message(message.get('role')):
|
69 |
-
st.write(message.get("content"))
|
70 |
-
|
71 |
-
context = st.sidebar.text_input(
|
72 |
-
label="Context",
|
73 |
-
help="Context lets you know on what the answer should be generated"
|
74 |
-
)
|
75 |
-
|
76 |
-
|
77 |
-
question = st.chat_input(
|
78 |
-
key="question",
|
79 |
-
disabled=text_input_visibility
|
80 |
-
)
|
81 |
-
|
82 |
-
template = f"<|system|>\nYou are a intelligent chatbot and expertise in {context}.</s>\n<|user|>\n{question}.\n<|assistant|>"
|
83 |
-
|
84 |
-
# template = """
|
85 |
-
# Answer the question based on the context, if you don't know then output "Out of Context"
|
86 |
-
# Context: {context}
|
87 |
-
# Question: {question}
|
88 |
-
|
89 |
-
# Answer:
|
90 |
-
# """
|
91 |
-
prompt = PromptTemplate(
|
92 |
-
template=template,
|
93 |
-
input_variables=[
|
94 |
-
'question',
|
95 |
-
'context'
|
96 |
-
]
|
97 |
-
)
|
98 |
-
llm = HuggingFaceHub(
|
99 |
-
repo_id = self.models,
|
100 |
-
model_kwargs = self.model_kwargs
|
101 |
-
)
|
102 |
-
|
103 |
-
if question:
|
104 |
-
llm_chain = LLMChain(
|
105 |
-
prompt=prompt,
|
106 |
-
llm=llm,
|
107 |
-
)
|
108 |
-
|
109 |
-
result = llm_chain.run({
|
110 |
-
"question": question,
|
111 |
-
"context": context
|
112 |
-
})
|
113 |
-
|
114 |
-
if "Out of Context" in result:
|
115 |
-
result = "Out of Context"
|
116 |
-
st.session_state.messages.append(
|
117 |
-
{
|
118 |
-
"role":"user",
|
119 |
-
"content": f"Context: {context}\n\nQuestion: {question}"
|
120 |
-
}
|
121 |
-
)
|
122 |
-
with st.chat_message("user"):
|
123 |
-
st.write(f"Context: {context}\n\nQuestion: {question}")
|
124 |
-
|
125 |
-
if question.lower() == "clear":
|
126 |
-
del st.session_state.messages
|
127 |
-
return
|
128 |
-
|
129 |
-
st.session_state.messages.append(
|
130 |
-
{
|
131 |
-
"role": "assistant",
|
132 |
-
"content": result
|
133 |
-
}
|
134 |
-
)
|
135 |
-
with st.chat_message('assistant'):
|
136 |
-
st.markdown(result)
|
137 |
-
|
138 |
-
except Exception as e:
|
139 |
-
st.error(e, icon="🚨")
|
140 |
-
|
141 |
-
model = UserInterface()
|
142 |
-
model.form_data()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|