Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,6 +2,7 @@ import gradio as gr
|
|
| 2 |
from gradio_client import Client
|
| 3 |
from huggingface_hub import InferenceClient
|
| 4 |
import random
|
|
|
|
| 5 |
ss_client = Client("https://omnibus-html-image-current-tab.hf.space/")
|
| 6 |
|
| 7 |
models=[
|
|
@@ -121,6 +122,7 @@ with gr.Blocks() as app:
|
|
| 121 |
custom_prompt=gr.Textbox(label="Modify Prompt Format", info="For testing purposes. 'USER_INPUT' is where 'SYSTEM_PROMPT, PROMPT' will be placed", lines=5,value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")
|
| 122 |
with gr.Column(scale=1):
|
| 123 |
with gr.Group():
|
|
|
|
| 124 |
rand = gr.Checkbox(label="Random Seed", value=True)
|
| 125 |
seed=gr.Slider(label="Seed", minimum=1, maximum=1111111111111111,step=1, value=rand_val)
|
| 126 |
tokens = gr.Slider(label="Max new tokens",value=1600,minimum=0,maximum=8000,step=64,interactive=True, visible=True,info="The maximum number of tokens")
|
|
@@ -132,7 +134,12 @@ with gr.Blocks() as app:
|
|
| 132 |
|
| 133 |
client_choice.change(load_models,client_choice,[chat_b])
|
| 134 |
app.load(load_models,client_choice,[chat_b])
|
| 135 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
|
| 137 |
go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
|
| 138 |
|
|
|
|
| 2 |
from gradio_client import Client
|
| 3 |
from huggingface_hub import InferenceClient
|
| 4 |
import random
|
| 5 |
+
from deep_translator import GoogleTranslator
|
| 6 |
ss_client = Client("https://omnibus-html-image-current-tab.hf.space/")
|
| 7 |
|
| 8 |
models=[
|
|
|
|
| 122 |
custom_prompt=gr.Textbox(label="Modify Prompt Format", info="For testing purposes. 'USER_INPUT' is where 'SYSTEM_PROMPT, PROMPT' will be placed", lines=5,value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")
|
| 123 |
with gr.Column(scale=1):
|
| 124 |
with gr.Group():
|
| 125 |
+
translate_fa = gr.Checkbox(label="Translate to Persian", value=True)
|
| 126 |
rand = gr.Checkbox(label="Random Seed", value=True)
|
| 127 |
seed=gr.Slider(label="Seed", minimum=1, maximum=1111111111111111,step=1, value=rand_val)
|
| 128 |
tokens = gr.Slider(label="Max new tokens",value=1600,minimum=0,maximum=8000,step=64,interactive=True, visible=True,info="The maximum number of tokens")
|
|
|
|
| 134 |
|
| 135 |
client_choice.change(load_models,client_choice,[chat_b])
|
| 136 |
app.load(load_models,client_choice,[chat_b])
|
| 137 |
+
if(len(inp) > 2000):
|
| 138 |
+
translatedtext1 = GoogleTranslator(source='auto', target='en').translate(inp[0:2000])
|
| 139 |
+
translatedtext2 = GoogleTranslator(source='auto', target='en').translate(inp[2000:(len(inp))])
|
| 140 |
+
inp = translatedtext1 + translatedtext2
|
| 141 |
+
else:
|
| 142 |
+
inp = GoogleTranslator(source='auto', target='en').translate(inp)
|
| 143 |
chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
|
| 144 |
go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
|
| 145 |
|