noddysnots's picture
Update app.py
4538963 verified
raw
history blame
1.2 kB
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load DeepSeek-R1 model and tokenizer
model_name = "deepseek-ai/DeepSeek-R1"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
# Function to generate gift recommendations
def recommend_gifts(text):
if not text:
return "Please enter a description."
# Prepare input prompt for the model
prompt = f"Based on the following description, suggest suitable gifts: '{text}'"
# Tokenize input and generate response
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
outputs = model.generate(inputs.input_ids, max_length=200, do_sample=True)
recommendation = tokenizer.decode(outputs[0], skip_special_tokens=True)
return {"Recommendation": recommendation}
# Gradio interface
demo = gr.Interface(
fn=recommend_gifts,
inputs="text",
outputs="json",
title="AI Gift Recommender",
description="Enter details about the person you are buying a gift for, and get personalized suggestions!",
)
if __name__ == "__main__":
demo.launch()