import os import gradio as gr import spaces from transformers import AutoTokenizer, AutoModelForCausalLM, TextGenerationPipeline model_id = "google/gemma-3-12b-it" hf_token = os.environ.get("HUGGINGFACE_TOKEN") # 包含模型加载 + 推理 @spaces.GPU def generate(prompt): tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_token) model = AutoModelForCausalLM.from_pretrained(model_id, token=hf_token, device_map="auto") pipeline = TextGenerationPipeline(model=model, tokenizer=tokenizer) output = pipeline(prompt, max_new_tokens=100, do_sample=True, temperature=0.7) return output[0]["generated_text"] # 构建界面 gr.Interface( fn=generate, inputs=gr.Text(label="Enter your prompt"), outputs=gr.Textbox(label="Generated Text"), title="Gemma-3-27B Inference (ZeroGPU)" ).launch()