Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,64 +1,55 @@
|
|
1 |
-
import subprocess
|
2 |
-
|
3 |
-
# List of packages to install
|
4 |
-
packages = ['langchain', 'langchain-community', 'langchainhub','langchain-chroma','langchain-groq','langchain-huggingface','gradio']
|
5 |
-
|
6 |
-
# Install packages
|
7 |
-
for package in packages:
|
8 |
-
subprocess.check_call(['pip', 'install', package])
|
9 |
|
10 |
#import dependacies
|
11 |
-
from langchain.memory import ConversationBufferMemory
|
12 |
-
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
|
13 |
-
from langchain_groq import ChatGroq
|
14 |
-
from langchain.chains import LLMChain
|
15 |
|
16 |
-
|
|
|
17 |
|
18 |
-
llm = None
|
19 |
|
20 |
-
|
21 |
-
global llm
|
22 |
-
if llm is None:
|
23 |
-
llm=ChatGroq(model="gemma2-9b-it",api_key=groq_api_key)
|
24 |
-
return llm
|
25 |
|
|
|
26 |
|
27 |
-
|
28 |
|
29 |
-
|
30 |
|
31 |
-
|
32 |
-
Use the provided context to answer the question.
|
33 |
-
try to engange with the user and follow up on questions asked
|
34 |
-
If you don't know the answer, say so. Explain your answer in detail.
|
35 |
-
Do not discuss the context in your response; just provide the answer directly.
|
36 |
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
st.
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
|
2 |
#import dependacies
|
|
|
|
|
|
|
|
|
3 |
|
4 |
+
from groq import Groq
|
5 |
+
import streamlit as st
|
6 |
|
|
|
7 |
|
8 |
+
groq_api_key=os.getenv("GROQ_API_KEY")
|
|
|
|
|
|
|
|
|
9 |
|
10 |
+
#initialize the a groq model
|
11 |
|
12 |
+
client=Groq(api_key=groq_api_key)
|
13 |
|
14 |
+
def get_response(query):
|
15 |
|
16 |
+
response=client.chat.compeletions.create(
|
|
|
|
|
|
|
|
|
17 |
|
18 |
+
messages=[
|
19 |
+
{"role":"system","content":"You are a math assistant. Your role is to solve math problems with a detailed, step-by-step solution. Be clear and concise in each step. If there are multiple approaches, select the most efficient method. Include any formulas or key concepts used, and provide the final answer at the end."},
|
20 |
+
{"role":"user","content": query}
|
21 |
+
],
|
22 |
+
model='gemma2-9b-it',
|
23 |
+
temperature=0.4,
|
24 |
+
stream=False,
|
25 |
+
max_tokens=1024,
|
26 |
+
stope=None
|
27 |
+
)
|
28 |
+
|
29 |
+
return response.choices[0].delta.content
|
30 |
+
|
31 |
+
|
32 |
+
st.title('📚🔗 Welcome to MathLearn♾ ')
|
33 |
+
|
34 |
+
# Streamlit session state to manage chat messages
|
35 |
+
if "messages" not in st.session_state:
|
36 |
+
st.session_state.messages = []
|
37 |
+
|
38 |
+
# Display chat history
|
39 |
+
for message in st.session_state.messages:
|
40 |
+
with st.chat_message(message["role"]):
|
41 |
+
st.markdown(message["content"])
|
42 |
+
|
43 |
+
# Accept user input and process response
|
44 |
+
if user_input := st.chat_input():
|
45 |
+
st.session_state.messages.append({"role": "user", "content": user_input})
|
46 |
+
with st.chat_message("user"):
|
47 |
+
st.markdown(user_input)
|
48 |
+
|
49 |
+
with st.chat_message("assistant"):
|
50 |
+
with st.spinner("Thinking..."):
|
51 |
+
response_text = get_response(user_input)
|
52 |
+
st.write(response_text)
|
53 |
+
|
54 |
+
# Save assistant's response to chat history
|
55 |
+
st.session_state.messages.append({"role": "assistant", "content": response_text})
|