Tonic commited on
Commit
ef5351a
·
unverified ·
1 Parent(s): 8ed5286

adds mcp server, ssrmode false , refactor the global variables

Browse files
Files changed (1) hide show
  1. app.py +22 -37
app.py CHANGED
@@ -5,13 +5,6 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
5
  import torch
6
  import json
7
 
8
- LEAN4_DEFAULT_HEADER = (
9
- "import Mathlib\n"
10
- "import Aesop\n\n"
11
- "set_option maxHeartbeats 0\n\n"
12
- "open BigOperators Real Nat Topology Rat\n"
13
- )
14
-
15
  title = "# 🙋🏻‍♂️Welcome to 🌟Tonic's 🌕💉👨🏻‍🔬Moonshot Math"
16
 
17
  description = """
@@ -39,27 +32,16 @@ joinus = """
39
  🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/qdfnvSPcqP) On 🤗Huggingface:[MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [MultiTonic](https://github.com/MultiTonic)🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
40
  """
41
 
42
- # Build the initial system prompt
43
  SYSTEM_PROMPT = "You are an expert in mathematics and Lean 4."
44
 
45
- # Helper to build a Lean4 code block
46
- def build_formal_block(formal_statement, informal_prefix=""):
47
- return (
48
- f"{LEAN4_DEFAULT_HEADER}\n"
49
- f"{informal_prefix}\n"
50
- f"{formal_statement}"
51
- )
52
 
53
- # Helper to extract the first Lean4 code block from text
54
- def extract_lean4_code(text):
55
- code_block = re.search(r"```lean4(.*?)(```|$)", text, re.DOTALL)
56
- if code_block:
57
- code = code_block.group(1)
58
- lines = [line for line in code.split('\n') if line.strip()]
59
- return '\n'.join(lines)
60
- return text.strip()
61
 
62
- # Example problems
63
  unimath1 = """Goal:
64
  X : UU
65
  Y : UU
@@ -88,6 +70,21 @@ unimath4 = """Goal:
88
 
89
  additional_info_prompt = "/-Explain using mathematics-/\n"
90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  examples = [
92
  [unimath1, additional_info_prompt, 1234],
93
  [unimath2, additional_info_prompt, 1234],
@@ -114,9 +111,7 @@ model_name = "AI-MO/Kimina-Prover-Distill-8B"
114
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
115
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto", trust_remote_code=True)
116
 
117
- # Set generation config
118
  model.generation_config = GenerationConfig.from_pretrained(model_name)
119
- # Ensure pad_token_id is an integer, not a list
120
  if isinstance(model.generation_config.eos_token_id, list):
121
  model.generation_config.pad_token_id = model.generation_config.eos_token_id[0]
122
  else:
@@ -125,7 +120,6 @@ model.generation_config.do_sample = True
125
  model.generation_config.temperature = 0.6
126
  model.generation_config.top_p = 0.95
127
 
128
- # Initialize chat history with system prompt
129
  def init_chat(formal_statement, informal_prefix):
130
  user_prompt = (
131
  "Think about and solve the following problem step by step in Lean 4.\n"
@@ -137,15 +131,12 @@ def init_chat(formal_statement, informal_prefix):
137
  {"role": "user", "content": user_prompt}
138
  ]
139
 
140
- # Gradio chat handler
141
  @spaces.GPU
142
  def chat_handler(user_message, informal_prefix, max_tokens, chat_history):
143
- # If chat_history is empty, initialize with system and first user message
144
  if not chat_history or len(chat_history) < 2:
145
  chat_history = init_chat(user_message, informal_prefix)
146
  display_history = [("user", user_message)]
147
  else:
148
- # Append new user message
149
  chat_history.append({"role": "user", "content": user_message})
150
  display_history = []
151
  for msg in chat_history:
@@ -153,7 +144,6 @@ def chat_handler(user_message, informal_prefix, max_tokens, chat_history):
153
  display_history.append(("user", msg["content"]))
154
  elif msg["role"] == "assistant":
155
  display_history.append(("assistant", msg["content"]))
156
- # Format prompt using chat template
157
  prompt = tokenizer.apply_chat_template(chat_history, tokenize=False, add_generation_prompt=True)
158
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
159
  attention_mask = torch.ones_like(input_ids)
@@ -166,14 +156,10 @@ def chat_handler(user_message, informal_prefix, max_tokens, chat_history):
166
  top_p=model.generation_config.top_p,
167
  )
168
  result = tokenizer.decode(outputs[0], skip_special_tokens=True)
169
- # Extract only the new assistant message (after the prompt)
170
  new_response = result[len(prompt):].strip()
171
- # Add assistant message to chat history
172
  chat_history.append({"role": "assistant", "content": new_response})
173
  display_history.append(("assistant", new_response))
174
- # Extract Lean4 code
175
  code = extract_lean4_code(new_response)
176
- # Prepare output
177
  output_data = {
178
  "model_input": prompt,
179
  "model_output": result,
@@ -184,7 +170,6 @@ def chat_handler(user_message, informal_prefix, max_tokens, chat_history):
184
 
185
  def main():
186
  with gr.Blocks() as demo:
187
- # Title and Model Description
188
  gr.Markdown("""# 🙋🏻‍♂️Welcome to 🌟Tonic's 🌕💉👨🏻‍🔬Moonshot Math""")
189
  with gr.Row():
190
  with gr.Column():
@@ -213,4 +198,4 @@ def main():
213
  demo.launch()
214
 
215
  if __name__ == "__main__":
216
- main()
 
5
  import torch
6
  import json
7
 
 
 
 
 
 
 
 
8
  title = "# 🙋🏻‍♂️Welcome to 🌟Tonic's 🌕💉👨🏻‍🔬Moonshot Math"
9
 
10
  description = """
 
32
  🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/qdfnvSPcqP) On 🤗Huggingface:[MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [MultiTonic](https://github.com/MultiTonic)🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
33
  """
34
 
 
35
  SYSTEM_PROMPT = "You are an expert in mathematics and Lean 4."
36
 
 
 
 
 
 
 
 
37
 
38
+ LEAN4_DEFAULT_HEADER = (
39
+ "import Mathlib\n"
40
+ "import Aesop\n\n"
41
+ "set_option maxHeartbeats 0\n\n"
42
+ "open BigOperators Real Nat Topology Rat\n"
43
+ )
 
 
44
 
 
45
  unimath1 = """Goal:
46
  X : UU
47
  Y : UU
 
70
 
71
  additional_info_prompt = "/-Explain using mathematics-/\n"
72
 
73
+ def build_formal_block(formal_statement, informal_prefix=""):
74
+ return (
75
+ f"{LEAN4_DEFAULT_HEADER}\n"
76
+ f"{informal_prefix}\n"
77
+ f"{formal_statement}"
78
+ )
79
+
80
+ def extract_lean4_code(text):
81
+ code_block = re.search(r"```lean4(.*?)(```|$)", text, re.DOTALL)
82
+ if code_block:
83
+ code = code_block.group(1)
84
+ lines = [line for line in code.split('\n') if line.strip()]
85
+ return '\n'.join(lines)
86
+ return text.strip()
87
+
88
  examples = [
89
  [unimath1, additional_info_prompt, 1234],
90
  [unimath2, additional_info_prompt, 1234],
 
111
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
112
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto", trust_remote_code=True)
113
 
 
114
  model.generation_config = GenerationConfig.from_pretrained(model_name)
 
115
  if isinstance(model.generation_config.eos_token_id, list):
116
  model.generation_config.pad_token_id = model.generation_config.eos_token_id[0]
117
  else:
 
120
  model.generation_config.temperature = 0.6
121
  model.generation_config.top_p = 0.95
122
 
 
123
  def init_chat(formal_statement, informal_prefix):
124
  user_prompt = (
125
  "Think about and solve the following problem step by step in Lean 4.\n"
 
131
  {"role": "user", "content": user_prompt}
132
  ]
133
 
 
134
  @spaces.GPU
135
  def chat_handler(user_message, informal_prefix, max_tokens, chat_history):
 
136
  if not chat_history or len(chat_history) < 2:
137
  chat_history = init_chat(user_message, informal_prefix)
138
  display_history = [("user", user_message)]
139
  else:
 
140
  chat_history.append({"role": "user", "content": user_message})
141
  display_history = []
142
  for msg in chat_history:
 
144
  display_history.append(("user", msg["content"]))
145
  elif msg["role"] == "assistant":
146
  display_history.append(("assistant", msg["content"]))
 
147
  prompt = tokenizer.apply_chat_template(chat_history, tokenize=False, add_generation_prompt=True)
148
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
149
  attention_mask = torch.ones_like(input_ids)
 
156
  top_p=model.generation_config.top_p,
157
  )
158
  result = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
159
  new_response = result[len(prompt):].strip()
 
160
  chat_history.append({"role": "assistant", "content": new_response})
161
  display_history.append(("assistant", new_response))
 
162
  code = extract_lean4_code(new_response)
 
163
  output_data = {
164
  "model_input": prompt,
165
  "model_output": result,
 
170
 
171
  def main():
172
  with gr.Blocks() as demo:
 
173
  gr.Markdown("""# 🙋🏻‍♂️Welcome to 🌟Tonic's 🌕💉👨🏻‍🔬Moonshot Math""")
174
  with gr.Row():
175
  with gr.Column():
 
198
  demo.launch()
199
 
200
  if __name__ == "__main__":
201
+ main(ssr_mode=False, mcp_server=True)