t / app.py
Lvk3's picture
Update app.py
5979a46 verified
raw
history blame
1.01 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
# Modellname
model_name = "meta-llama/Llama-3.1-8B-Instruct"
# Tokenizer und Modell laden
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map=None, # Keine GPU
torch_dtype="float32" # Float32 für CPU
)
# Funktion für die Textgenerierung
def generate_response(prompt):
inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
outputs = model.generate(inputs["input_ids"], max_length=200, num_beams=5, early_stopping=True)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Gradio-Interface erstellen
interface = gr.Interface(
fn=generate_response,
inputs="text",
outputs="text",
title="LLaMA 3.1 8B Instruct Text Generator (CPU)",
description="Gib einen Text ein, und LLaMA 3.1 8B Instruct generiert eine Antwort."
)
# App starten
if __name__ == "__main__":
interface.launch()