File size: 1,196 Bytes
aeeed0b
c435330
 
aeeed0b
4538963
 
c435330
 
47b24d6
4538963
aeeed0b
 
 
 
4538963
 
 
 
 
 
 
aeeed0b
4538963
aeeed0b
4538963
aeeed0b
4538963
 
47b24d6
4538963
 
aeeed0b
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

# Load DeepSeek-R1 model and tokenizer
model_name = "deepseek-ai/DeepSeek-R1"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")

# Function to generate gift recommendations
def recommend_gifts(text):
    if not text:
        return "Please enter a description."

    # Prepare input prompt for the model
    prompt = f"Based on the following description, suggest suitable gifts: '{text}'"

    # Tokenize input and generate response
    inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
    outputs = model.generate(inputs.input_ids, max_length=200, do_sample=True)
    recommendation = tokenizer.decode(outputs[0], skip_special_tokens=True)

    return {"Recommendation": recommendation}

# Gradio interface
demo = gr.Interface(
    fn=recommend_gifts,
    inputs="text",
    outputs="json",
    title="AI Gift Recommender",
    description="Enter details about the person you are buying a gift for, and get personalized suggestions!",
)

if __name__ == "__main__":
    demo.launch()