Spaces:
Running
Running
File size: 2,552 Bytes
aeeed0b 48bf064 c435330 91e1ca4 aeeed0b 4df7255 df9ff98 91e1ca4 48bf064 4df7255 3f2c2f8 48bf064 4df7255 48bf064 df9ff98 3f2c2f8 48bf064 3f2c2f8 48bf064 47b24d6 3f2c2f8 df9ff98 3f2c2f8 df9ff98 48bf064 df9ff98 91e1ca4 48bf064 91e1ca4 48bf064 df9ff98 aeeed0b 48bf064 df9ff98 91e1ca4 3f2c2f8 91e1ca4 aeeed0b 48bf064 df9ff98 aeeed0b 48bf064 47b24d6 91e1ca4 aeeed0b 91e1ca4 aeeed0b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import torch
import requests
# Load DeepSeek-R1 model with trust_remote_code enabled
model_name = "deepseek-ai/DeepSeek-R1"
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
# Ensure compatibility with `flash_attn` and force proper dtype
try:
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16, # Forces float16 to prevent fp8 issue
device_map="auto",
trust_remote_code=True
)
except ImportError as e:
raise RuntimeError("Missing required dependency: flash_attn. Install with `pip install flash_attn`") from e
# Use a text-generation pipeline for better inference
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
# π― Function to extract interests from user input
def extract_interests(text):
prompt = f"Extract 3-5 relevant interests from this request: '{text}'. Focus on hobbies and product preferences."
# Generate model output
response = generator(prompt, max_length=50, num_return_sequences=1)
interests = response[0]["generated_text"].replace(prompt, "").strip()
return interests.split(", ") # Convert to a list of keywords
# π Web search for gift suggestions
def search_gifts(interests):
query = "+".join(interests)
amazon_url = f"https://www.amazon.in/s?k={query}"
flipkart_url = f"https://www.flipkart.com/search?q={query}"
igp_url = f"https://www.igp.com/search?q={query}"
indiamart_url = f"https://dir.indiamart.com/search.mp?ss={query}"
return {
"Amazon": amazon_url,
"Flipkart": flipkart_url,
"IGP": igp_url,
"IndiaMart": indiamart_url
}
# π― Main function for gift recommendation
def recommend_gifts(text):
if not text:
return "Please enter a description."
interests = extract_interests(text) # Extract interests using DeepSeek R1
links = search_gifts(interests) # Get shopping links
return {
"Predicted Interests": interests,
"Gift Suggestions": links
}
# π¨ Gradio UI for easy interaction
demo = gr.Interface(
fn=recommend_gifts,
inputs="text",
outputs="json",
title="π AI Gift Recommender",
description="Enter details about the person you are buying a gift for, and get personalized suggestions with shopping links!",
)
# π Launch Gradio App
if __name__ == "__main__":
demo.launch()
|