dlflannery commited on
Commit
1e70f16
·
verified ·
1 Parent(s): cd06238

Update app.py

Browse files

4 deepseek models via together.ai

Files changed (1) hide show
  1. app.py +53 -19
app.py CHANGED
@@ -32,6 +32,7 @@ users = os.getenv('LOGNAME')
32
  unames = users.split(',')
33
  pwds = os.getenv('PASSWORD')
34
  pwdList = pwds.split(',')
 
35
 
36
  site = os.getenv('SITE')
37
  if site == 'local':
@@ -660,8 +661,25 @@ def chat(prompt, user_window, pwd_window, past, response, gptModel, uploaded_ima
660
  return [past, 'Stock data file created', None, gptModel, uploaded_image_file, plot]
661
  if user_window in unames and pwd_window == pwdList[unames.index(user_window)]:
662
  chatType = 'normal'
 
663
  prompt = prompt.strip()
664
- if prompt.lower().startswith('solve'):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
665
  prompt = 'How do I solve ' + prompt[5:] + ' Do not use Latex for math expressions.'
666
  chatType = 'math'
667
  elif prompt.lower().startswith('puzzle'):
@@ -673,9 +691,16 @@ def chat(prompt, user_window, pwd_window, past, response, gptModel, uploaded_ima
673
  (reply, tokens_in, tokens_out, tokens) = solve(prompt, chatType)
674
  reporting_model = image_gen_model
675
  elif not gen_image:
676
- completion = Client().chat.completions.create(model=gptModel,
 
 
 
 
 
 
 
677
  messages=past)
678
- reporting_model = gptModel
679
  else:
680
  (completion, msg) = analyze_image(user_window, image_gen_model, prompt)
681
  uploaded_image_file= ''
@@ -684,6 +709,14 @@ def chat(prompt, user_window, pwd_window, past, response, gptModel, uploaded_ima
684
  return [past, msg, None, gptModel, uploaded_image_file, plot]
685
  if not chatType in special_chat_types:
686
  reply = completion.choices[0].message.content
 
 
 
 
 
 
 
 
687
  tokens_in = completion.usage.prompt_tokens
688
  tokens_out = completion.usage.completion_tokens
689
  tokens = completion.usage.total_tokens
@@ -692,22 +725,23 @@ def chat(prompt, user_window, pwd_window, past, response, gptModel, uploaded_ima
692
  response += md(f"\n{reporting_model}: tokens in/out = {tokens_in}/{tokens_out}")
693
  if tokens > 40000:
694
  response += "\n\nTHIS DIALOG IS GETTING TOO LONG. PLEASE RESTART CONVERSATION SOON."
695
- past.append({"role":"assistant", "content": reply})
696
- accessOk = False
697
- for i in range(3):
698
- try:
699
- dataFile = new_func(user_window)
700
- with open(dataFile, 'a') as f:
701
- m = '4o'
702
- if 'mini' in reporting_model:
703
- m = '4omini'
704
- f.write(f'{user_window}:{tokens_in}/{tokens_out}-{m}\n')
705
- accessOk = True
706
- break
707
- except Exception as e:
708
- sleep(3)
709
- if not accessOk:
710
- response += f"\nDATA LOG FAILED, path = {dataFile}"
 
711
  return [past, response , None, gptModel, uploaded_image_file, plot]
712
  else:
713
  return [[], "User name and/or password are incorrect", prompt, gptModel, uploaded_image_file, plot]
 
32
  unames = users.split(',')
33
  pwds = os.getenv('PASSWORD')
34
  pwdList = pwds.split(',')
35
+ DEEPSEEK_KEY=os.getenv('DEEPSEEK_KEY')
36
 
37
  site = os.getenv('SITE')
38
  if site == 'local':
 
661
  return [past, 'Stock data file created', None, gptModel, uploaded_image_file, plot]
662
  if user_window in unames and pwd_window == pwdList[unames.index(user_window)]:
663
  chatType = 'normal'
664
+ deepseek = False
665
  prompt = prompt.strip()
666
+ if prompt.lower().startswith('dsr1 '):
667
+ deepseek = True
668
+ ds_model = 'deepseek-ai/DeepSeek-R1'
669
+ prompt = prompt[5:]
670
+ elif prompt.lower().startswith('ds1.5 '):
671
+ deepseek = True
672
+ ds_model = 'deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B'
673
+ prompt = prompt[6:]
674
+ elif prompt.lower().startswith('ds14 '):
675
+ deepseek = True
676
+ ds_model = 'deepseek-ai/DeepSeek-R1-Distill-Qwen-14B'
677
+ prompt = prompt[5:]
678
+ elif prompt.lower().startswith('ds70 '):
679
+ deepseek = True
680
+ ds_model = 'deepseek-ai/DeepSeek-R1-Distill-Llama-70B'
681
+ prompt = prompt[5:]
682
+ elif prompt.lower().startswith('solve'):
683
  prompt = 'How do I solve ' + prompt[5:] + ' Do not use Latex for math expressions.'
684
  chatType = 'math'
685
  elif prompt.lower().startswith('puzzle'):
 
691
  (reply, tokens_in, tokens_out, tokens) = solve(prompt, chatType)
692
  reporting_model = image_gen_model
693
  elif not gen_image:
694
+ if deepseek:
695
+ client = OpenAI(api_key=DEEPSEEK_KEY, base_url='https://api.together.xyz/v1')
696
+ completion = client.chat.completions.create(
697
+ model= ds_model,
698
+ messages=past)
699
+ reporting_model='deepseek'
700
+ else:
701
+ completion = Client().chat.completions.create(model=gptModel,
702
  messages=past)
703
+ reporting_model = gptModel
704
  else:
705
  (completion, msg) = analyze_image(user_window, image_gen_model, prompt)
706
  uploaded_image_file= ''
 
709
  return [past, msg, None, gptModel, uploaded_image_file, plot]
710
  if not chatType in special_chat_types:
711
  reply = completion.choices[0].message.content
712
+ final_text = reply
713
+ if deepseek:
714
+ loc1 = reply.find('<think>')
715
+ if loc1 > -1:
716
+ loc2 = reply.find('</think>')
717
+ if loc2 > loc1:
718
+ final_text = reply[loc2 + 8:]
719
+ reply = reply.replace('<think>','Thinking:\n').replace('</think>','Done thinking:\n')
720
  tokens_in = completion.usage.prompt_tokens
721
  tokens_out = completion.usage.completion_tokens
722
  tokens = completion.usage.total_tokens
 
725
  response += md(f"\n{reporting_model}: tokens in/out = {tokens_in}/{tokens_out}")
726
  if tokens > 40000:
727
  response += "\n\nTHIS DIALOG IS GETTING TOO LONG. PLEASE RESTART CONVERSATION SOON."
728
+ past.append({"role":"assistant", "content": final_text})
729
+ if not deepseek:
730
+ accessOk = False
731
+ for i in range(3):
732
+ try:
733
+ dataFile = new_func(user_window)
734
+ with open(dataFile, 'a') as f:
735
+ m = '4o'
736
+ if 'mini' in reporting_model:
737
+ m = '4omini'
738
+ f.write(f'{user_window}:{tokens_in}/{tokens_out}-{m}\n')
739
+ accessOk = True
740
+ break
741
+ except Exception as e:
742
+ sleep(3)
743
+ if not accessOk:
744
+ response += f"\nDATA LOG FAILED, path = {dataFile}"
745
  return [past, response , None, gptModel, uploaded_image_file, plot]
746
  else:
747
  return [[], "User name and/or password are incorrect", prompt, gptModel, uploaded_image_file, plot]