dlflannery commited on
Commit
169dbe5
·
verified ·
1 Parent(s): 21bdc86

Update app.py

Browse files

Errors handled in Solve(). FlatLatex gone.

Files changed (1) hide show
  1. app.py +57 -44
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import os
2
  import gradio as gr
3
- # import openai
4
  from numpy._core.defchararray import endswith, isdecimal
5
  from openai import OpenAI
6
  from dotenv import load_dotenv
@@ -17,8 +17,6 @@ from PIL import Image
17
  from io import BytesIO
18
  from pydantic import BaseModel
19
  import pprint
20
- import flatlatex
21
- lconv = flatlatex.converter()
22
 
23
  load_dotenv(override=True)
24
  key = os.getenv('OPENAI_API_KEY')
@@ -45,6 +43,8 @@ client = OpenAI(api_key = key)
45
 
46
  abbrevs = {'St. ' : 'Saint ', 'Mr. ': 'mister ', 'Mrs. ':'mussus ', 'Mr. ':'mister ', 'Ms. ':'mizz '}
47
 
 
 
48
  class Step(BaseModel):
49
  explanation: str
50
  output: str
@@ -55,19 +55,40 @@ class MathReasoning(BaseModel):
55
 
56
 
57
  def solve(prompt, chatType):
 
 
 
58
  if chatType == 'math':
59
  instruction = "You are a helpful math tutor. Guide the user through the solution step by step."
60
  elif chatType == "logic":
61
- instruction = "you are a helpful tutor expert in logic. Guide the user through the solution step by step"
62
- completion = client.beta.chat.completions.parse(
63
- model = 'gpt-4o-2024-08-06',
64
- messages = [
65
- {"role": "system", "content": instruction},
66
- {"role": "user", "content": prompt}
67
- ],
68
- response_format=MathReasoning,
69
- )
70
- return completion
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
  def genUsageStats(do_reset=False):
73
  result = []
@@ -199,22 +220,23 @@ def updatePassword(txt):
199
  password = txt.lower().strip()
200
  return [password, "*********"]
201
 
202
- def parse_math(txt):
203
- ref = 0
204
- loc = txt.find(r'\\(')
205
- if loc == -1:
206
- return txt
207
- while (True):
208
- loc2 = txt[ref:].find(r'\\)')
209
- if loc2 == -1:
210
- break
211
- loc = txt[ref:].find(r'\\(')
212
- if loc > -1:
213
- loc2 += 2
214
- frag = lconv.convert(txt[ref:][loc:loc2])
215
- txt = txt[:loc+ref] + frag + txt[loc2+ref:]
216
- ref = len(txt[ref:loc]) + len(frag)
217
- return txt
 
218
 
219
  def chat(prompt, user_window, pwd_window, past, response, gptModel, uploaded_image_file=''):
220
  image_gen_model = 'gpt-4o-2024-08-06'
@@ -251,8 +273,8 @@ def chat(prompt, user_window, pwd_window, past, response, gptModel, uploaded_ima
251
  prompt = prompt[6:]
252
  past.append({"role":"user", "content":prompt})
253
  gen_image = (uploaded_image_file != '')
254
- if chatType in ['math', 'logic']:
255
- completion = solve(prompt, chatType)
256
  reporting_model = image_gen_model
257
  elif not gen_image:
258
  completion = client.chat.completions.create(model=gptModel,
@@ -264,20 +286,11 @@ def chat(prompt, user_window, pwd_window, past, response, gptModel, uploaded_ima
264
  reporting_model = image_gen_model
265
  if not msg == 'ok':
266
  return [past, msg, None, gptModel, uploaded_image_file]
267
- if chatType in ['math', 'logic']:
268
- dr = completion.choices[0].message.parsed.model_dump()
269
- reply = pprint.pformat(dr)
270
- # df = {'final_answer' : parse_math(dr['final_answer'])}
271
- # df['steps'] = []
272
- # for x in dr['steps']:
273
- # df['steps'].append({'explanation': parse_math(x['explanation']), 'output' : parse_math(x['output'])})
274
-
275
- # reply = pprint.pformat(df)
276
- else:
277
  reply = completion.choices[0].message.content
278
- tokens_in = completion.usage.prompt_tokens
279
- tokens_out = completion.usage.completion_tokens
280
- tokens = completion.usage.total_tokens
281
  response += "\n\nYOU: " + prompt + "\nGPT: " + reply
282
  if isBoss:
283
  response += f"\n{reporting_model}: tokens in/out = {tokens_in}/{tokens_out}"
 
1
  import os
2
  import gradio as gr
3
+ import openai
4
  from numpy._core.defchararray import endswith, isdecimal
5
  from openai import OpenAI
6
  from dotenv import load_dotenv
 
17
  from io import BytesIO
18
  from pydantic import BaseModel
19
  import pprint
 
 
20
 
21
  load_dotenv(override=True)
22
  key = os.getenv('OPENAI_API_KEY')
 
43
 
44
  abbrevs = {'St. ' : 'Saint ', 'Mr. ': 'mister ', 'Mrs. ':'mussus ', 'Mr. ':'mister ', 'Ms. ':'mizz '}
45
 
46
+ special_chat_types = ['math', 'logic']
47
+
48
  class Step(BaseModel):
49
  explanation: str
50
  output: str
 
55
 
56
 
57
  def solve(prompt, chatType):
58
+ tokens_in = 0
59
+ tokens_out = 0
60
+ tokens = 0
61
  if chatType == 'math':
62
  instruction = "You are a helpful math tutor. Guide the user through the solution step by step."
63
  elif chatType == "logic":
64
+ instruction = "you are an expert in logic and reasoning. Guide the user through the solution step by step"
65
+ try:
66
+ completion = client.beta.chat.completions.parse(
67
+ model = 'gpt-4o-2024-08-06',
68
+ messages = [
69
+ {"role": "system", "content": instruction},
70
+ {"role": "user", "content": prompt}
71
+ ],
72
+ response_format=MathReasoning,
73
+ max_tokens = 2000
74
+ )
75
+
76
+ tokens_in = completion.usage.prompt_tokens
77
+ tokens_out = completion.usage.completion_tokens
78
+ tokens = completion.usage.total_tokens
79
+ msg = completion.choices[0].message
80
+ if msg.parsed:
81
+ dr = msg.parsed.model_dump()
82
+ response = pprint.pformat(dr)
83
+ elif msg.refusal:
84
+ response = msg.refusal
85
+
86
+ except Exception as e:
87
+ if type(e) == openai.LengthFinishReasonError:
88
+ response = 'Too many tokens'
89
+ else:
90
+ response = str(e)
91
+ return (response, tokens_in, tokens_out, tokens)
92
 
93
  def genUsageStats(do_reset=False):
94
  result = []
 
220
  password = txt.lower().strip()
221
  return [password, "*********"]
222
 
223
+ # def parse_math(txt):
224
+ # ref = 0
225
+ # loc = txt.find(r'\(')
226
+ # if loc == -1:
227
+ # return txt
228
+ # while (True):
229
+ # loc2 = txt[ref:].find(r'\)')
230
+ # if loc2 == -1:
231
+ # break
232
+ # loc = txt[ref:].find(r'\(')
233
+ # if loc > -1:
234
+ # loc2 += 2
235
+ # slice = txt[ref:][loc:loc2]
236
+ # frag = lconv.convert(slice)
237
+ # txt = txt[:loc+ref] + frag + txt[loc2+ref:]
238
+ # ref = len(txt[ref:loc]) + len(frag)
239
+ # return txt
240
 
241
  def chat(prompt, user_window, pwd_window, past, response, gptModel, uploaded_image_file=''):
242
  image_gen_model = 'gpt-4o-2024-08-06'
 
273
  prompt = prompt[6:]
274
  past.append({"role":"user", "content":prompt})
275
  gen_image = (uploaded_image_file != '')
276
+ if chatType in special_chat_types:
277
+ (reply, tokens_in, tokens_out, tokens) = solve(prompt, chatType)
278
  reporting_model = image_gen_model
279
  elif not gen_image:
280
  completion = client.chat.completions.create(model=gptModel,
 
286
  reporting_model = image_gen_model
287
  if not msg == 'ok':
288
  return [past, msg, None, gptModel, uploaded_image_file]
289
+ if not chatType in special_chat_types:
 
 
 
 
 
 
 
 
 
290
  reply = completion.choices[0].message.content
291
+ tokens_in = completion.usage.prompt_tokens
292
+ tokens_out = completion.usage.completion_tokens
293
+ tokens = completion.usage.total_tokens
294
  response += "\n\nYOU: " + prompt + "\nGPT: " + reply
295
  if isBoss:
296
  response += f"\n{reporting_model}: tokens in/out = {tokens_in}/{tokens_out}"