Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,19 +1,18 @@
|
|
1 |
import gradio as gr
|
|
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
3 |
import threading
|
4 |
-
import torch
|
5 |
|
6 |
-
# Detectar dispositivo automaticamente
|
7 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
8 |
|
9 |
-
#
|
10 |
model_name = "lambdaindie/lambda-1v-1B"
|
11 |
model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
|
12 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
13 |
|
14 |
stop_flag = {"stop": False}
|
15 |
|
16 |
-
# Função de resposta
|
17 |
def respond(prompt, history):
|
18 |
stop_flag["stop"] = False
|
19 |
|
@@ -22,7 +21,6 @@ def respond(prompt, history):
|
|
22 |
|
23 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
24 |
|
25 |
-
# Iniciar thread de geração
|
26 |
generation_thread = threading.Thread(
|
27 |
target=model.generate,
|
28 |
kwargs={
|
@@ -43,29 +41,26 @@ def respond(prompt, history):
|
|
43 |
if stop_flag["stop"]:
|
44 |
return "", history
|
45 |
reasoning += new_text
|
46 |
-
yield "",
|
47 |
|
48 |
-
# Função para parar a geração
|
49 |
def stop_generation():
|
50 |
stop_flag["stop"] = True
|
51 |
|
52 |
-
# Interface Gradio
|
53 |
with gr.Blocks(css="""
|
54 |
-
#chatbot, .gr-markdown, .gr-button, .gr-textbox {
|
55 |
-
font-family: 'JetBrains Mono', monospace !important;
|
56 |
-
font-size: 11px !important;
|
57 |
-
}
|
58 |
-
.final-answer {
|
59 |
-
background-color: #1e1e1e;
|
60 |
-
color: #ffffff;
|
61 |
-
padding: 10px;
|
62 |
-
border-left: 4px solid #4caf50;
|
63 |
-
font-family: 'JetBrains Mono', monospace !important;
|
64 |
-
white-space: pre-wrap;
|
65 |
-
font-size: 11px !important;
|
66 |
}
|
67 |
""") as demo:
|
68 |
-
gr.Markdown('<link href="https://fonts.googleapis.com/css2?family=JetBrains+Mono&display=swap" rel="stylesheet">')
|
69 |
gr.Markdown("## λambdAI — Reasoning Chat")
|
70 |
|
71 |
chatbot = gr.Chatbot(elem_id="chatbot")
|
|
|
1 |
import gradio as gr
|
2 |
+
import torch
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
4 |
import threading
|
|
|
5 |
|
6 |
+
# Detectar dispositivo automaticamente
|
7 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
8 |
|
9 |
+
# Inicializar o modelo e o tokenizer
|
10 |
model_name = "lambdaindie/lambda-1v-1B"
|
11 |
model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
|
12 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
13 |
|
14 |
stop_flag = {"stop": False}
|
15 |
|
|
|
16 |
def respond(prompt, history):
|
17 |
stop_flag["stop"] = False
|
18 |
|
|
|
21 |
|
22 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
23 |
|
|
|
24 |
generation_thread = threading.Thread(
|
25 |
target=model.generate,
|
26 |
kwargs={
|
|
|
41 |
if stop_flag["stop"]:
|
42 |
return "", history
|
43 |
reasoning += new_text
|
44 |
+
yield "", history[:-1] + [(prompt, f"<div class='final-answer'>{reasoning}</div>")]
|
45 |
|
|
|
46 |
def stop_generation():
|
47 |
stop_flag["stop"] = True
|
48 |
|
|
|
49 |
with gr.Blocks(css="""
|
50 |
+
#chatbot, .gr-markdown, .gr-button, .gr-textbox {
|
51 |
+
font-family: 'JetBrains Mono', monospace !important;
|
52 |
+
font-size: 11px !important;
|
53 |
+
}
|
54 |
+
.final-answer {
|
55 |
+
background-color: #1e1e1e;
|
56 |
+
color: #ffffff;
|
57 |
+
padding: 10px;
|
58 |
+
border-left: 4px solid #4caf50;
|
59 |
+
font-family: 'JetBrains Mono', monospace !important;
|
60 |
+
white-space: pre-wrap;
|
61 |
+
font-size: 11px !important;
|
62 |
}
|
63 |
""") as demo:
|
|
|
64 |
gr.Markdown("## λambdAI — Reasoning Chat")
|
65 |
|
66 |
chatbot = gr.Chatbot(elem_id="chatbot")
|