Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
-
import os
|
4 |
import time
|
5 |
|
6 |
# Load model and tokenizer
|
@@ -32,11 +31,14 @@ def add_text(history, text):
|
|
32 |
history = history + [(text, None)]
|
33 |
return history, gr.Textbox(value="", interactive=False)
|
34 |
|
35 |
-
def
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
40 |
yield history
|
41 |
|
42 |
with gr.Blocks() as demo:
|
@@ -44,24 +46,25 @@ with gr.Blocks() as demo:
|
|
44 |
[],
|
45 |
elem_id="chatbot",
|
46 |
bubble_full_width=False,
|
|
|
47 |
)
|
48 |
|
49 |
with gr.Row():
|
50 |
-
|
51 |
scale=4,
|
52 |
show_label=False,
|
53 |
-
placeholder="Enter text and press enter
|
54 |
container=False,
|
55 |
)
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
|
61 |
-
txt_msg =
|
62 |
-
bot, chatbot,
|
63 |
)
|
64 |
-
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [
|
65 |
|
66 |
chatbot.like(print_like_dislike, None, None)
|
67 |
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
3 |
import time
|
4 |
|
5 |
# Load model and tokenizer
|
|
|
31 |
history = history + [(text, None)]
|
32 |
return history, gr.Textbox(value="", interactive=False)
|
33 |
|
34 |
+
def add_file(history, file):
|
35 |
+
history = history + [((file.name,), None)]
|
36 |
+
return history
|
37 |
+
|
38 |
+
def bot(history, max_len, min_len, temp):
|
39 |
+
prompt = history[-1][0]
|
40 |
+
response = generate_text(prompt, max_length=max_len, min_length=min_len, temperature=temp)
|
41 |
+
history[-1] = (prompt, response)
|
42 |
yield history
|
43 |
|
44 |
with gr.Blocks() as demo:
|
|
|
46 |
[],
|
47 |
elem_id="chatbot",
|
48 |
bubble_full_width=False,
|
49 |
+
avatar_images=(None, None), # You can add an avatar image if needed
|
50 |
)
|
51 |
|
52 |
with gr.Row():
|
53 |
+
prompt_txt = gr.Textbox(
|
54 |
scale=4,
|
55 |
show_label=False,
|
56 |
+
placeholder="Enter text and press enter",
|
57 |
container=False,
|
58 |
)
|
59 |
|
60 |
+
max_len_slider = gr.Slider(0, 2048, 100, label="Max Length")
|
61 |
+
min_len_slider = gr.Slider(0, 2048, 20, label="Min Length")
|
62 |
+
temp_slider = gr.Slider(0.1, 2.0, 1.0, label="Temperature")
|
63 |
|
64 |
+
txt_msg = prompt_txt.submit(add_text, [chatbot, prompt_txt], [chatbot, prompt_txt], queue=False).then(
|
65 |
+
bot, chatbot, max_len_slider, min_len_slider, temp_slider, api_name="bot_response"
|
66 |
)
|
67 |
+
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [prompt_txt], queue=False)
|
68 |
|
69 |
chatbot.like(print_like_dislike, None, None)
|
70 |
|