gnosticdev's picture
Update app.py
344bc83 verified
raw
history blame
4.37 kB
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Verificar si hay GPU disponible (Zero-GPU)
if torch.cuda.is_available():
device = "cuda" # Usar GPU Zero
print("Zero-GPU detectada. Usando GPU para acelerar la inferencia.")
else:
device = "cpu" # Usar CPU si no hay GPU
print("No se detect贸 GPU. Usando CPU.")
# Cargar modelo m谩s peque帽o para generar c贸digo
model_name = "Qwen/Qwen2.5-Coder-1.5B-Instruct"
print("Cargando modelo...")
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16, # Usar float16 para ahorrar memoria
device_map="auto" if device == "cuda" else None # Distribuir autom谩ticamente en GPU si est谩 disponible
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Mover el modelo expl铆citamente a GPU si es necesario
if device == "cuda":
model.to("cuda")
print("Modelo cargado con 茅xito.")
def generate_code(prompt):
"""Genera c贸digo basado en el prompt del usuario."""
messages = [
{"role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."},
{"role": "user", "content": prompt},
{"role": "assistant", "content": ""}
]
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
model_inputs = tokenizer([text], return_tensors="pt").to(device) # Mover entradas al dispositivo correspondiente
generated_ids = model.generate(
**model_inputs,
max_new_tokens=128, # Reducir tokens para respuestas m谩s r谩pidas y ahorrar memoria
do_sample=True,
temperature=0.7
)
response = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
return response
def extract_code(output):
"""Extrae HTML, CSS y JavaScript del texto generado."""
html_code = ""
css_code = ""
js_code = ""
if "<style>" in output:
css_start = output.find("<style>") + len("<style>")
css_end = output.find("</style>")
css_code = output[css_start:css_end].strip()
if "<script>" in output:
js_start = output.find("<script>") + len("<script>")
js_end = output.find("</script>")
js_code = output[js_start:js_end].strip()
if "<body>" in output:
html_start = output.find("<body>") + len("<body>")
html_end = output.find("</body>")
html_code = output[html_start:html_end].strip()
return html_code, css_code, js_code
def preview_app(html_code, css_code, js_code):
"""Devuelve una vista previa interactiva de la aplicaci贸n."""
html_content = f"""
<html>
<head>
<style>
{css_code}
</style>
</head>
<body>
{html_code}
<script>
{js_code}
</script>
</body>
</html>
"""
return html_content
def run_chatbot(user_input):
"""Procesa la entrada del usuario y genera c贸digo + previsualizaci贸n."""
code_output = generate_code(user_input)
# Extraer HTML, CSS y JS del c贸digo generado
html_code, css_code, js_code = extract_code(code_output)
# Previsualizar la aplicaci贸n
preview = preview_app(html_code, css_code, js_code)
return (
f"### HTML:\n\n```html\n{html_code}\n```",
f"### CSS:\n\n```css\n{css_code}\n```",
f"### JavaScript:\n\n```javascript\n{js_code}\n```",
preview
)
# Crear la interfaz con Gradio
with gr.Blocks() as demo:
gr.Markdown("# Chatbot Creador de Aplicaciones")
with gr.Row():
with gr.Column():
user_input = gr.Textbox(label="Descripci贸n de la aplicaci贸n (Ejemplo: 'Haz un bot贸n rojo')", lines=3)
generate_button = gr.Button("Generar C贸digo")
with gr.Column():
html_output = gr.Code(label="C贸digo HTML", language="html")
css_output = gr.Code(label="C贸digo CSS", language="css")
js_output = gr.Code(label="C贸digo JavaScript", language="javascript")
preview_output = gr.HTML(label="Previsualizaci贸n")
generate_button.click(
run_chatbot,
inputs=[user_input],
outputs=[html_output, css_output, js_output, preview_output]
)
# Lanzar la aplicaci贸n
if __name__ == "__main__":
demo.launch()