Koomemartin commited on
Commit
1ad30a4
·
verified ·
1 Parent(s): 731addc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -54
app.py CHANGED
@@ -1,64 +1,55 @@
1
- import subprocess
2
-
3
- # List of packages to install
4
- packages = ['langchain', 'langchain-community', 'langchainhub','langchain-chroma','langchain-groq','langchain-huggingface','gradio']
5
-
6
- # Install packages
7
- for package in packages:
8
- subprocess.check_call(['pip', 'install', package])
9
 
10
  #import dependacies
11
- from langchain.memory import ConversationBufferMemory
12
- from langchain_community.chat_message_histories import StreamlitChatMessageHistory
13
- from langchain_groq import ChatGroq
14
- from langchain.chains import LLMChain
15
 
16
- groq_api_key=''
 
17
 
18
- llm = None
19
 
20
- def load_model():
21
- global llm
22
- if llm is None:
23
- llm=ChatGroq(model="gemma2-9b-it",api_key=groq_api_key)
24
- return llm
25
 
 
26
 
27
- llm = load_model()
28
 
29
- from langchain_core.prompts import PromptTemplate
30
 
31
- template = ("""You are a professional Maths tutor answer questions provided by user in step by step manner.
32
- Use the provided context to answer the question.
33
- try to engange with the user and follow up on questions asked
34
- If you don't know the answer, say so. Explain your answer in detail.
35
- Do not discuss the context in your response; just provide the answer directly.
36
 
37
- Question: {question}
38
-
39
- Answer:""")
40
-
41
- rag_prompt = PromptTemplate.from_template(template)
42
-
43
-
44
-
45
- history = StreamlitChatMessageHistory(key="chat_messages")
46
-
47
- #Step 3 - here we create a memory object
48
-
49
- memory = ConversationBufferMemory(chat_memory=history)
50
-
51
- llm_chain = LLMChain(llm=llm, prompt=rag_prompt, memory=memory)
52
-
53
- import streamlit as st
54
-
55
- st.title('🦜🔗 Welcome to the MathLearn ')
56
- for msg in history.messages:
57
- st.chat_message(msg.type).write(msg.content)
58
-
59
- if x := st.chat_input():
60
- st.chat_message("human").write(x)
61
-
62
- # As usual, new messages are added to StreamlitChatMessageHistory when the Chain is called.
63
- response = llm_chain.invoke(x)
64
- st.chat_message("ai").write(response["text"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
  #import dependacies
 
 
 
 
3
 
4
+ from groq import Groq
5
+ import streamlit as st
6
 
 
7
 
8
+ groq_api_key=os.getenv("GROQ_API_KEY")
 
 
 
 
9
 
10
+ #initialize the a groq model
11
 
12
+ client=Groq(api_key=groq_api_key)
13
 
14
+ def get_response(query):
15
 
16
+ response=client.chat.compeletions.create(
 
 
 
 
17
 
18
+ messages=[
19
+ {"role":"system","content":"You are a math assistant. Your role is to solve math problems with a detailed, step-by-step solution. Be clear and concise in each step. If there are multiple approaches, select the most efficient method. Include any formulas or key concepts used, and provide the final answer at the end."},
20
+ {"role":"user","content": query}
21
+ ],
22
+ model='gemma2-9b-it',
23
+ temperature=0.4,
24
+ stream=False,
25
+ max_tokens=1024,
26
+ stope=None
27
+ )
28
+
29
+ return response.choices[0].delta.content
30
+
31
+
32
+ st.title('📚🔗 Welcome to MathLearn♾ ')
33
+
34
+ # Streamlit session state to manage chat messages
35
+ if "messages" not in st.session_state:
36
+ st.session_state.messages = []
37
+
38
+ # Display chat history
39
+ for message in st.session_state.messages:
40
+ with st.chat_message(message["role"]):
41
+ st.markdown(message["content"])
42
+
43
+ # Accept user input and process response
44
+ if user_input := st.chat_input():
45
+ st.session_state.messages.append({"role": "user", "content": user_input})
46
+ with st.chat_message("user"):
47
+ st.markdown(user_input)
48
+
49
+ with st.chat_message("assistant"):
50
+ with st.spinner("Thinking..."):
51
+ response_text = get_response(user_input)
52
+ st.write(response_text)
53
+
54
+ # Save assistant's response to chat history
55
+ st.session_state.messages.append({"role": "assistant", "content": response_text})