Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,14 +2,30 @@ import gradio as gr
|
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import torch
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
# Cargar modelo m谩s peque帽o para generar c贸digo
|
6 |
model_name = "Qwen/Qwen2.5-Coder-1.5B-Instruct"
|
|
|
7 |
model = AutoModelForCausalLM.from_pretrained(
|
8 |
model_name,
|
9 |
-
torch_dtype=torch.float16 # Usar float16 para ahorrar memoria
|
|
|
10 |
)
|
11 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
def generate_code(prompt):
|
14 |
"""Genera c贸digo basado en el prompt del usuario."""
|
15 |
messages = [
|
@@ -18,16 +34,39 @@ def generate_code(prompt):
|
|
18 |
{"role": "assistant", "content": ""}
|
19 |
]
|
20 |
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
21 |
-
model_inputs = tokenizer([text], return_tensors="pt")
|
22 |
generated_ids = model.generate(
|
23 |
**model_inputs,
|
24 |
-
max_new_tokens=
|
25 |
do_sample=True,
|
26 |
temperature=0.7
|
27 |
)
|
28 |
response = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
29 |
return response
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
def preview_app(html_code, css_code, js_code):
|
32 |
"""Devuelve una vista previa interactiva de la aplicaci贸n."""
|
33 |
html_content = f"""
|
@@ -52,24 +91,7 @@ def run_chatbot(user_input):
|
|
52 |
code_output = generate_code(user_input)
|
53 |
|
54 |
# Extraer HTML, CSS y JS del c贸digo generado
|
55 |
-
html_code =
|
56 |
-
css_code = ""
|
57 |
-
js_code = ""
|
58 |
-
|
59 |
-
if "<style>" in code_output:
|
60 |
-
css_start = code_output.find("<style>") + len("<style>")
|
61 |
-
css_end = code_output.find("</style>")
|
62 |
-
css_code = code_output[css_start:css_end].strip()
|
63 |
-
|
64 |
-
if "<script>" in code_output:
|
65 |
-
js_start = code_output.find("<script>") + len("<script>")
|
66 |
-
js_end = code_output.find("</script>")
|
67 |
-
js_code = code_output[js_start:js_end].strip()
|
68 |
-
|
69 |
-
if "<body>" in code_output:
|
70 |
-
html_start = code_output.find("<body>") + len("<body>")
|
71 |
-
html_end = code_output.find("</body>")
|
72 |
-
html_code = code_output[html_start:html_end].strip()
|
73 |
|
74 |
# Previsualizar la aplicaci贸n
|
75 |
preview = preview_app(html_code, css_code, js_code)
|
|
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import torch
|
4 |
|
5 |
+
# Verificar si hay GPU disponible (Zero-GPU)
|
6 |
+
if torch.cuda.is_available():
|
7 |
+
device = "cuda" # Usar GPU Zero
|
8 |
+
print("Zero-GPU detectada. Usando GPU para acelerar la inferencia.")
|
9 |
+
else:
|
10 |
+
device = "cpu" # Usar CPU si no hay GPU
|
11 |
+
print("No se detect贸 GPU. Usando CPU.")
|
12 |
+
|
13 |
# Cargar modelo m谩s peque帽o para generar c贸digo
|
14 |
model_name = "Qwen/Qwen2.5-Coder-1.5B-Instruct"
|
15 |
+
print("Cargando modelo...")
|
16 |
model = AutoModelForCausalLM.from_pretrained(
|
17 |
model_name,
|
18 |
+
torch_dtype=torch.float16, # Usar float16 para ahorrar memoria
|
19 |
+
device_map="auto" if device == "cuda" else None # Distribuir autom谩ticamente en GPU si est谩 disponible
|
20 |
)
|
21 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
22 |
|
23 |
+
# Mover el modelo expl铆citamente a GPU si es necesario
|
24 |
+
if device == "cuda":
|
25 |
+
model.to("cuda")
|
26 |
+
|
27 |
+
print("Modelo cargado con 茅xito.")
|
28 |
+
|
29 |
def generate_code(prompt):
|
30 |
"""Genera c贸digo basado en el prompt del usuario."""
|
31 |
messages = [
|
|
|
34 |
{"role": "assistant", "content": ""}
|
35 |
]
|
36 |
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
37 |
+
model_inputs = tokenizer([text], return_tensors="pt").to(device) # Mover entradas al dispositivo correspondiente
|
38 |
generated_ids = model.generate(
|
39 |
**model_inputs,
|
40 |
+
max_new_tokens=128, # Reducir tokens para respuestas m谩s r谩pidas y ahorrar memoria
|
41 |
do_sample=True,
|
42 |
temperature=0.7
|
43 |
)
|
44 |
response = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
45 |
return response
|
46 |
|
47 |
+
def extract_code(output):
|
48 |
+
"""Extrae HTML, CSS y JavaScript del texto generado."""
|
49 |
+
html_code = ""
|
50 |
+
css_code = ""
|
51 |
+
js_code = ""
|
52 |
+
|
53 |
+
if "<style>" in output:
|
54 |
+
css_start = output.find("<style>") + len("<style>")
|
55 |
+
css_end = output.find("</style>")
|
56 |
+
css_code = output[css_start:css_end].strip()
|
57 |
+
|
58 |
+
if "<script>" in output:
|
59 |
+
js_start = output.find("<script>") + len("<script>")
|
60 |
+
js_end = output.find("</script>")
|
61 |
+
js_code = output[js_start:js_end].strip()
|
62 |
+
|
63 |
+
if "<body>" in output:
|
64 |
+
html_start = output.find("<body>") + len("<body>")
|
65 |
+
html_end = output.find("</body>")
|
66 |
+
html_code = output[html_start:html_end].strip()
|
67 |
+
|
68 |
+
return html_code, css_code, js_code
|
69 |
+
|
70 |
def preview_app(html_code, css_code, js_code):
|
71 |
"""Devuelve una vista previa interactiva de la aplicaci贸n."""
|
72 |
html_content = f"""
|
|
|
91 |
code_output = generate_code(user_input)
|
92 |
|
93 |
# Extraer HTML, CSS y JS del c贸digo generado
|
94 |
+
html_code, css_code, js_code = extract_code(code_output)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
|
96 |
# Previsualizar la aplicaci贸n
|
97 |
preview = preview_app(html_code, css_code, js_code)
|