amirgame197 commited on
Commit
0f2cb6b
·
verified ·
1 Parent(s): bcb52b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -41,7 +41,7 @@ def format_prompt(message, history, cust_p):
41
  prompt+=cust_p.replace("USER_INPUT",message)
42
  return prompt
43
 
44
- def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,cust_p):
45
  #token max=8192
46
  if(len(prompt) > 2000):
47
  translatedtext1 = GoogleTranslator(source='auto', target='en').translate(prompt[0:2000])
@@ -90,6 +90,8 @@ def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,
90
  for response in stream:
91
  output += response.token.text
92
  yield [(prompt,output)],memory
 
 
93
  history.append((prompt,output))
94
  memory.append((prompt,output))
95
  yield history,memory
@@ -128,6 +130,7 @@ with gr.Blocks() as app:
128
  custom_prompt=gr.Textbox(label="Modify Prompt Format", info="For testing purposes. 'USER_INPUT' is where 'SYSTEM_PROMPT, PROMPT' will be placed", lines=5,value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")
129
  with gr.Column(scale=1):
130
  with gr.Group():
 
131
  rand = gr.Checkbox(label="Random Seed", value=True)
132
  seed=gr.Slider(label="Seed", minimum=1, maximum=1111111111111111,step=1, value=rand_val)
133
  tokens = gr.Slider(label="Max new tokens",value=1600,minimum=0,maximum=8000,step=64,interactive=True, visible=True,info="The maximum number of tokens")
@@ -140,8 +143,8 @@ with gr.Blocks() as app:
140
  client_choice.change(load_models,client_choice,[chat_b])
141
  app.load(load_models,client_choice,[chat_b])
142
 
143
- chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
144
- go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
145
 
146
  clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b,memory])
147
  app.queue(default_concurrency_limit=10).launch()
 
41
  prompt+=cust_p.replace("USER_INPUT",message)
42
  return prompt
43
 
44
+ def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,cust_p,translate_fa):
45
  #token max=8192
46
  if(len(prompt) > 2000):
47
  translatedtext1 = GoogleTranslator(source='auto', target='en').translate(prompt[0:2000])
 
90
  for response in stream:
91
  output += response.token.text
92
  yield [(prompt,output)],memory
93
+ if(translate_fa):
94
+ output = GoogleTranslator(source='auto', target='en').translate(output)
95
  history.append((prompt,output))
96
  memory.append((prompt,output))
97
  yield history,memory
 
130
  custom_prompt=gr.Textbox(label="Modify Prompt Format", info="For testing purposes. 'USER_INPUT' is where 'SYSTEM_PROMPT, PROMPT' will be placed", lines=5,value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")
131
  with gr.Column(scale=1):
132
  with gr.Group():
133
+ translate_fa = gr.Checkbox(label="Translate to Persian", value=True)
134
  rand = gr.Checkbox(label="Random Seed", value=True)
135
  seed=gr.Slider(label="Seed", minimum=1, maximum=1111111111111111,step=1, value=rand_val)
136
  tokens = gr.Slider(label="Max new tokens",value=1600,minimum=0,maximum=8000,step=64,interactive=True, visible=True,info="The maximum number of tokens")
 
143
  client_choice.change(load_models,client_choice,[chat_b])
144
  app.load(load_models,client_choice,[chat_b])
145
 
146
+ chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt,translate_fa],[chat_b,memory])
147
+ go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt,translate_fa],[chat_b,memory])
148
 
149
  clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b,memory])
150
  app.queue(default_concurrency_limit=10).launch()