import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer # Load the model and tokenizer model_name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Define the prediction function def predict(input_text): inputs = tokenizer(input_text, return_tensors="pt") outputs = model.generate(**inputs) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response # Create the Gradio interface iface = gr.Interface( fn=predict, inputs=[gr.Textbox(lines=5, label="Input Text")], outputs=[gr.Textbox(label="Generated Text")], title="DeepSeek-R1-Distill-Qwen-1.5B Text Generation", description="Enter text and the model will generate a continuation.", ) if __name__ == "__main__": iface.launch()