File size: 2,643 Bytes
aeeed0b
48bf064
c435330
91e1ca4
aeeed0b
1e69485
 
 
 
 
 
4df7255
df9ff98
91e1ca4
48bf064
1e69485
 
 
 
 
 
 
 
 
 
 
4df7255
48bf064
 
df9ff98
3f2c2f8
48bf064
3f2c2f8
48bf064
 
 
 
 
47b24d6
3f2c2f8
df9ff98
3f2c2f8
 
df9ff98
48bf064
df9ff98
 
 
91e1ca4
48bf064
 
 
 
91e1ca4
 
48bf064
df9ff98
aeeed0b
 
 
 
48bf064
df9ff98
91e1ca4
 
3f2c2f8
 
91e1ca4
aeeed0b
48bf064
df9ff98
aeeed0b
48bf064
 
47b24d6
91e1ca4
 
aeeed0b
 
91e1ca4
aeeed0b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import torch
import requests

# Ensure torch is installed before flash_attn
try:
    import flash_attn
except ImportError:
    raise RuntimeError("Missing required dependency: flash_attn. Install with `pip install flash-attn`")

# Load DeepSeek-R1 model with trust_remote_code enabled
model_name = "deepseek-ai/DeepSeek-R1"
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)

# Ensure compatibility and force execution on GPU if available
device = "cuda" if torch.cuda.is_available() else "cpu"

model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype=torch.float16 if device == "cuda" else torch.float32,  # Prevents fp8 errors
    device_map="auto",
    trust_remote_code=True
)

# Use a text-generation pipeline
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)


# 🎯 Function to extract interests from user input
def extract_interests(text):
    prompt = f"Extract 3-5 relevant interests from this request: '{text}'. Focus on hobbies and product preferences."

    # Generate model output
    response = generator(prompt, max_length=50, num_return_sequences=1)
    interests = response[0]["generated_text"].replace(prompt, "").strip()

    return interests.split(", ")  # Convert to a list of keywords


# 🎁 Web search for gift suggestions
def search_gifts(interests):
    query = "+".join(interests)
    amazon_url = f"https://www.amazon.in/s?k={query}"
    flipkart_url = f"https://www.flipkart.com/search?q={query}"
    igp_url = f"https://www.igp.com/search?q={query}"
    indiamart_url = f"https://dir.indiamart.com/search.mp?ss={query}"

    return {
        "Amazon": amazon_url,
        "Flipkart": flipkart_url,
        "IGP": igp_url,
        "IndiaMart": indiamart_url
    }


# 🎯 Main function for gift recommendation
def recommend_gifts(text):
    if not text:
        return "Please enter a description."

    interests = extract_interests(text)  # Extract interests using DeepSeek R1
    links = search_gifts(interests)  # Get shopping links

    return {
        "Predicted Interests": interests,
        "Gift Suggestions": links
    }


# 🎨 Gradio UI for easy interaction
demo = gr.Interface(
    fn=recommend_gifts,
    inputs="text",
    outputs="json",
    title="🎁 AI Gift Recommender",
    description="Enter details about the person you are buying a gift for, and get personalized suggestions with shopping links!",
)

# πŸš€ Launch Gradio App
if __name__ == "__main__":
    demo.launch()