mmaleki92 commited on
Commit
c0dd994
·
verified ·
1 Parent(s): 6740bd4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -31
app.py CHANGED
@@ -1,7 +1,7 @@
1
- import gradio as gr
2
  from llama_cpp import Llama
3
- import json
4
  import os
 
5
  import time
6
 
7
  # Function to convert message history to prompt
@@ -9,8 +9,8 @@ def prompt_from_messages(messages):
9
  prompt = ''
10
  for message in messages:
11
  prompt += f"<|start_header_id|>{message['role']}<|end_header_id|>\n\n"
12
- prompt += f"{message['content']}<|eot_id|>{{}}" # Corrected here
13
- prompt = prompt[:-10] # Adjust the slicing accordingly
14
  return prompt
15
 
16
  # Initialize the Llama model
@@ -21,54 +21,64 @@ llm = Llama.from_pretrained(
21
  verbose=False
22
  )
23
 
24
- # Initialize chat history
25
- messages = [
26
- {
27
- 'role': 'system',
28
- 'content': 'You are a professional physics master. Answer physics questions directly without using any external resources.'
29
- }
30
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
- # Function to handle user input and generate a response
33
- def chat_with_physics_master(user_input):
34
- global messages # Ensure we can modify the global messages variable
 
35
 
 
36
  # Append user message
37
  user_message = {'role': 'user', 'content': user_input}
38
- messages.append(user_message)
39
 
40
  # Prepare to get the response from Physics Master
 
 
 
41
  full_response = ""
42
 
43
  # Fetch response tokens and accumulate them
44
  response = llm.create_chat_completion(
45
- messages=messages,
46
  stream=True
47
  )
48
 
49
  for chunk in response:
50
  delta = chunk['choices'][0]['delta']
51
  if 'role' in delta:
52
- messages.append({'role': delta['role'], 'content': ''})
53
  elif 'content' in delta:
54
  token = delta['content']
55
  # Accumulate tokens into the full response
56
  full_response += token
57
 
58
  # Once the full response is received, append it to the chat history
59
- messages[-1]['content'] = full_response
60
-
61
- # Return the entire chat history for display
62
- return [(msg['role'], msg['content']) for msg in messages]
63
 
64
- # Gradio interface
65
- iface = gr.Interface(
66
- fn=chat_with_physics_master,
67
- inputs=gr.inputs.Textbox(label="Ask a question"),
68
- outputs=gr.outputs.Chatbox(label="Chat History"),
69
- title="Physics Master Chatbot",
70
- description="Ask **Physics Master** any physics-related question.",
71
- )
72
 
73
- # Launch the Gradio app
74
- iface.launch()
 
 
1
+ import streamlit as st
2
  from llama_cpp import Llama
 
3
  import os
4
+ import json
5
  import time
6
 
7
  # Function to convert message history to prompt
 
9
  prompt = ''
10
  for message in messages:
11
  prompt += f"<|start_header_id|>{message['role']}<|end_header_id|>\n\n"
12
+ prompt += f"{message['content']}<|eot_id|>"
13
+ prompt = prompt[:-10]
14
  return prompt
15
 
16
  # Initialize the Llama model
 
21
  verbose=False
22
  )
23
 
24
+ # Set up Streamlit App Layout
25
+ st.title("Physics Master Chatbot")
26
+ st.markdown("Ask **Physics Master** any physics-related question.")
27
+
28
+ # Initialize chat history in session state
29
+ if 'messages' not in st.session_state:
30
+ st.session_state.messages = [
31
+ {
32
+ 'role': 'system',
33
+ 'content': 'You are a professional physics master. Answer physics questions directly without using any external resources.'
34
+ }
35
+ ]
36
+ st.session_state.chat_time = time.time()
37
+
38
+ # Display chat history
39
+ for message in st.session_state.messages:
40
+ if message['role'] == 'user':
41
+ st.write(f"**You:** {message['content']}")
42
+ else:
43
+ st.write(f"**Physics Master:** {message['content']}")
44
 
45
+ # Use a form to manage user input and submission
46
+ with st.form(key="input_form", clear_on_submit=True):
47
+ user_input = st.text_input("Ask a question", key="user_input")
48
+ submit_button = st.form_submit_button(label="Send")
49
 
50
+ if submit_button and user_input:
51
  # Append user message
52
  user_message = {'role': 'user', 'content': user_input}
53
+ st.session_state.messages.append(user_message)
54
 
55
  # Prepare to get the response from Physics Master
56
+ st.write('Physics Master is thinking...')
57
+
58
+ # Initialize an empty string to accumulate the response
59
  full_response = ""
60
 
61
  # Fetch response tokens and accumulate them
62
  response = llm.create_chat_completion(
63
+ messages=st.session_state.messages,
64
  stream=True
65
  )
66
 
67
  for chunk in response:
68
  delta = chunk['choices'][0]['delta']
69
  if 'role' in delta:
70
+ st.session_state.messages.append({'role': delta['role'], 'content': ''})
71
  elif 'content' in delta:
72
  token = delta['content']
73
  # Accumulate tokens into the full response
74
  full_response += token
75
 
76
  # Once the full response is received, append it to the chat history
77
+ st.session_state.messages[-1]['content'] = full_response
 
 
 
78
 
79
+ # Display the full response as a paragraph
80
+ st.write(f"**Physics Master:** {full_response}")
 
 
 
 
 
 
81
 
82
+ # Save the chat history to a JSON file
83
+ with open('chat_history.json', 'w', encoding='utf8') as file:
84
+ json.dump(st.session_state.messages, file, indent=4)