import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM import torch # Carrega tokenizer e modelo manualmente tokenizer = AutoTokenizer.from_pretrained("lambdaindie/lambda-1v-1B") model = AutoModelForCausalLM.from_pretrained("lambdaindie/lambda-1v-1B") # Garante uso de CPU device = torch.device("cpu") model.to(device) # Função de geração def responder(prompt): inputs = tokenizer(prompt, return_tensors="pt").to(device) outputs = model.generate( **inputs, max_new_tokens=50, do_sample=True, top_p=0.95, temperature=0.8, pad_token_id=tokenizer.eos_token_id # evita warning se for causal ) return tokenizer.decode(outputs[0], skip_special_tokens=True) # Interface Gradio iface = gr.Interface( fn=responder, inputs=gr.Textbox(lines=2, placeholder="Escreve algo..."), outputs="text", title="Lambda-1v-1B", description="Modelo local de geração de texto criado por Marius Jabami.", ) iface.launch()