Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -26,6 +26,10 @@ def translate_to_english(prompt):
|
|
26 |
translated_prompt = GoogleTranslator(source='auto', target='en').translate(prompt)
|
27 |
return translated_prompt
|
28 |
|
|
|
|
|
|
|
|
|
29 |
def load_models(inp):
|
30 |
if VERBOSE == True:
|
31 |
print(type(inp))
|
@@ -44,7 +48,7 @@ def format_prompt(message, history, cust_p):
|
|
44 |
prompt += cust_p.replace("USER_INPUT", message)
|
45 |
return prompt
|
46 |
|
47 |
-
def chat_inf(system_prompt, prompt, history, memory, client_choice, seed, temp, tokens, top_p, rep_p, chat_mem,
|
48 |
hist_len = 0
|
49 |
client = clients[int(client_choice) - 1]
|
50 |
if not history:
|
@@ -65,9 +69,9 @@ def chat_inf(system_prompt, prompt, history, memory, client_choice, seed, temp,
|
|
65 |
max_new_tokens=tokens,
|
66 |
)
|
67 |
if system_prompt:
|
68 |
-
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0 - chat_mem:],
|
69 |
else:
|
70 |
-
formatted_prompt = format_prompt(prompt, memory[0 - chat_mem:],
|
71 |
|
72 |
translated_prompt = translate_to_english(formatted_prompt)
|
73 |
|
@@ -79,6 +83,8 @@ def chat_inf(system_prompt, prompt, history, memory, client_choice, seed, temp,
|
|
79 |
output = ""
|
80 |
for response in stream:
|
81 |
output += response.token.text
|
|
|
|
|
82 |
yield [(prompt, output)], memory
|
83 |
history.append((prompt, output))
|
84 |
memory.append((prompt, output))
|
@@ -115,6 +121,7 @@ with gr.Blocks() as app:
|
|
115 |
custom_prompt = gr.Textbox(label="Modify Prompt Format", info="For testing purposes. 'USER_INPUT' is where 'SYSTEM_PROMPT, PROMPT' will be placed", lines=5, value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")
|
116 |
with gr.Column(scale=1):
|
117 |
with gr.Group():
|
|
|
118 |
rand = gr.Checkbox(label="Random Seed", value=True)
|
119 |
seed = gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, step=1, value=rand_val)
|
120 |
tokens = gr.Slider(label="Max new tokens", value=1600, minimum=0, maximum=8000, step=64, interactive=True, visible=True, info="The maximum number of tokens")
|
@@ -126,8 +133,8 @@ with gr.Blocks() as app:
|
|
126 |
client_choice.change(load_models, client_choice, [chat_b])
|
127 |
app.load(load_models, client_choice, [chat_b])
|
128 |
|
129 |
-
chat_sub = inp.submit(check_rand, [rand, seed], seed).then(chat_inf, [sys_inp, inp, chat_b, memory, client_choice, seed, temp, tokens, top_p, rep_p, chat_mem, custom_prompt], [chat_b, memory])
|
130 |
-
go = btn.click(check_rand, [rand, seed], seed).then(chat_inf, [sys_inp, inp, chat_b, memory, client_choice, seed, temp, tokens, top_p, rep_p, chat_mem, custom_prompt], [chat_b, memory])
|
131 |
|
132 |
clear_btn.click(clear_fn, None, [inp, sys_inp, chat_b, memory])
|
133 |
app.queue(default_concurrency_limit=10).launch()
|
|
|
26 |
translated_prompt = GoogleTranslator(source='auto', target='en').translate(prompt)
|
27 |
return translated_prompt
|
28 |
|
29 |
+
def translate_to_persian(response):
|
30 |
+
translated_response = GoogleTranslator(source='auto', target='fa').translate(response)
|
31 |
+
return translated_response
|
32 |
+
|
33 |
def load_models(inp):
|
34 |
if VERBOSE == True:
|
35 |
print(type(inp))
|
|
|
48 |
prompt += cust_p.replace("USER_INPUT", message)
|
49 |
return prompt
|
50 |
|
51 |
+
def chat_inf(system_prompt, prompt, history, memory, client_choice, seed, temp, tokens, top_p, rep_p, chat_mem, custom_prompt, translate_to_persian):
|
52 |
hist_len = 0
|
53 |
client = clients[int(client_choice) - 1]
|
54 |
if not history:
|
|
|
69 |
max_new_tokens=tokens,
|
70 |
)
|
71 |
if system_prompt:
|
72 |
+
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0 - chat_mem:], custom_prompt)
|
73 |
else:
|
74 |
+
formatted_prompt = format_prompt(prompt, memory[0 - chat_mem:], custom_prompt)
|
75 |
|
76 |
translated_prompt = translate_to_english(formatted_prompt)
|
77 |
|
|
|
83 |
output = ""
|
84 |
for response in stream:
|
85 |
output += response.token.text
|
86 |
+
if translate_to_persian:
|
87 |
+
output = translate_to_persian(output)
|
88 |
yield [(prompt, output)], memory
|
89 |
history.append((prompt, output))
|
90 |
memory.append((prompt, output))
|
|
|
121 |
custom_prompt = gr.Textbox(label="Modify Prompt Format", info="For testing purposes. 'USER_INPUT' is where 'SYSTEM_PROMPT, PROMPT' will be placed", lines=5, value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")
|
122 |
with gr.Column(scale=1):
|
123 |
with gr.Group():
|
124 |
+
translate_to_persian_checkbox = gr.Checkbox(label="Translate to Persian", value=True)
|
125 |
rand = gr.Checkbox(label="Random Seed", value=True)
|
126 |
seed = gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, step=1, value=rand_val)
|
127 |
tokens = gr.Slider(label="Max new tokens", value=1600, minimum=0, maximum=8000, step=64, interactive=True, visible=True, info="The maximum number of tokens")
|
|
|
133 |
client_choice.change(load_models, client_choice, [chat_b])
|
134 |
app.load(load_models, client_choice, [chat_b])
|
135 |
|
136 |
+
chat_sub = inp.submit(check_rand, [rand, seed], seed).then(chat_inf, [sys_inp, inp, chat_b, memory, client_choice, seed, temp, tokens, top_p, rep_p, chat_mem, custom_prompt, translate_to_persian_checkbox], [chat_b, memory])
|
137 |
+
go = btn.click(check_rand, [rand, seed], seed).then(chat_inf, [sys_inp, inp, chat_b, memory, client_choice, seed, temp, tokens, top_p, rep_p, chat_mem, custom_prompt, translate_to_persian_checkbox], [chat_b, memory])
|
138 |
|
139 |
clear_btn.click(clear_fn, None, [inp, sys_inp, chat_b, memory])
|
140 |
app.queue(default_concurrency_limit=10).launch()
|