Spaces:
Sleeping
Sleeping
File size: 10,003 Bytes
15c7d5c 0f58622 cc2fcc8 0f58622 8b25124 15c7d5c dc7e417 15c7d5c 0f58622 15c7d5c cc2fcc8 15c7d5c 0f58622 cc2fcc8 0f58622 15c7d5c 0f58622 15c7d5c 0f58622 cc2fcc8 0f58622 cc2fcc8 0f58622 cc2fcc8 0f58622 15c7d5c 0f58622 15c7d5c 0f58622 15c7d5c 0f58622 cc2fcc8 0f58622 15c7d5c 0f58622 cc2fcc8 15c7d5c 0f58622 cc2fcc8 15c7d5c 0f58622 15c7d5c 0f58622 cc2fcc8 0f58622 15c7d5c 0f58622 cc2fcc8 0f58622 15c7d5c 0f58622 cc2fcc8 15c7d5c 0f58622 cc2fcc8 15c7d5c 0f58622 15c7d5c 0f58622 cc2fcc8 0f58622 15c7d5c 0f58622 cc2fcc8 0f58622 cc2fcc8 15c7d5c cc2fcc8 0f58622 cc2fcc8 15c7d5c cc2fcc8 0f58622 cc2fcc8 15c7d5c 0f58622 cc2fcc8 0f58622 dc7e417 cc2fcc8 0f58622 cc2fcc8 15c7d5c cc2fcc8 0f58622 cc2fcc8 15c7d5c 0f58622 cc2fcc8 0f58622 dc7e417 0f58622 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 |
from typing import Dict, List
import aiohttp
import asyncio
import re
import torch
from sentence_transformers import SentenceTransformer, util
from bs4 import BeautifulSoup
class DynamicRecommender:
def __init__(self):
self.headers = {
'User-Agent': (
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/100.0.4896.75 Safari/537.36'
)
}
# Load your model
self.model = SentenceTransformer('all-mpnet-base-v2')
# Pre‐define some candidate categories you might want to search for.
# Adjust these to suit your domain. The more you add, the more "general"
# your coverage becomes. They can be as broad or as niche as you like.
self.candidate_categories = [
"tech gadgets",
"programming books",
"self help books",
"business books",
"leadership novels",
"fashion accessories",
"beauty products",
"board games",
"music instruments",
"cooking utensils",
"cookbooks",
"art and painting supplies",
"home decor",
"pet supplies",
"novels",
"gaming consoles",
"smartphones",
"camera gear",
"toys",
"gift hamper"
]
# Pre‐encode those categories for faster scoring.
self.category_embeddings = self.model.encode(self.candidate_categories, convert_to_tensor=True)
# ------------------------------------------------------------------
# Amazon search
# ------------------------------------------------------------------
async def search_amazon(self, query: str) -> List[Dict]:
print(f"Searching Amazon for: {query}")
search_url = f"https://www.amazon.in/s?k={query}"
async with aiohttp.ClientSession() as session:
async with session.get(search_url, headers=self.headers) as response:
if response.status == 200:
html = await response.text()
return self._parse_amazon_results(html)
return []
def _parse_amazon_results(self, html: str) -> List[Dict]:
soup = BeautifulSoup(html, 'html.parser')
products = []
# These selectors may need updating if Amazon changes HTML
search_items = soup.select('.s-result-item')
for item in search_items:
try:
name_elem = item.select_one('.a-text-normal')
price_elem = item.select_one('.a-price-whole')
link_elem = item.select_one('a.a-link-normal')
if name_elem and price_elem and link_elem:
product_name = name_elem.get_text(strip=True)
product_price = price_elem.get_text(strip=True)
product_url = link_elem.get('href')
products.append({
'name': product_name,
'price': product_price,
'source': 'Amazon',
'url': 'https://www.amazon.in' + product_url,
'description': f"This item is from Amazon related to '{product_name}'."
})
except Exception:
continue
return products[:5]
# ------------------------------------------------------------------
# Flipkart search
# ------------------------------------------------------------------
async def search_flipkart(self, query: str) -> List[Dict]:
print(f"Searching Flipkart for: {query}")
search_url = f"https://www.flipkart.com/search?q={query}"
async with aiohttp.ClientSession() as session:
async with session.get(search_url, headers=self.headers) as response:
if response.status == 200:
html = await response.text()
return self._parse_flipkart_results(html)
return []
def _parse_flipkart_results(self, html: str) -> List[Dict]:
soup = BeautifulSoup(html, 'html.parser')
products = []
# These selectors may need updating if Flipkart changes HTML
item_cards = soup.select('._1AtVbE')
for item in item_cards:
try:
name_elem = item.select_one('._4rR01T')
price_elem = item.select_one('._30jeq3')
link_elem = item.select_one('a')
if name_elem and price_elem and link_elem:
product_name = name_elem.get_text(strip=True)
product_price = price_elem.get_text(strip=True)
product_url = link_elem.get('href')
products.append({
'name': product_name,
'price': product_price,
'source': 'Flipkart',
'url': 'https://www.flipkart.com' + product_url,
'description': f"This item is from Flipkart related to '{product_name}'."
})
except Exception:
continue
return products[:5]
# ------------------------------------------------------------------
# IGP search
# ------------------------------------------------------------------
async def search_igp(self, query: str) -> List[Dict]:
print(f"Searching IGP for: {query}")
search_url = f"https://www.igp.com/search/{query}"
async with aiohttp.ClientSession() as session:
async with session.get(search_url, headers=self.headers) as response:
if response.status == 200:
html = await response.text()
return self._parse_igp_results(html)
return []
def _parse_igp_results(self, html: str) -> List[Dict]:
soup = BeautifulSoup(html, 'html.parser')
products = []
# Likely need to update based on actual IGP HTML
item_cards = soup.select('.product-item')
for item in item_cards:
try:
name_elem = item.select_one('.product-title')
price_elem = item.select_one('.product-price')
link_elem = item.select_one('a')
if name_elem and price_elem and link_elem:
product_name = name_elem.get_text(strip=True)
product_price = price_elem.get_text(strip=True)
product_url = link_elem.get('href')
products.append({
'name': product_name,
'price': product_price,
'source': 'IGP',
'url': 'https://www.igp.com' + product_url,
'description': f"This item is from IGP related to '{product_name}'."
})
except Exception:
continue
return products[:5]
# ------------------------------------------------------------------
# Extract categories from user text using embeddings
# ------------------------------------------------------------------
def _extract_keywords(self, text: str) -> List[str]:
"""
1. Parse out age if present
2. Use embeddings to find top 2-3 matching categories
from self.candidate_categories.
3. Combine them with the age if found.
"""
# 1) Check for age with a regex
age_match = re.search(r'age\s+(\d+)', text.lower())
age = age_match.group(1) if age_match else None
# 2) Use the entire user text as an embedding
user_emb = self.model.encode(text, convert_to_tensor=True)
# Compute similarity with each candidate category
sims = util.cos_sim(user_emb, self.category_embeddings)[0] # shape: [num_categories]
# Grab top 3 indices
top_k = min(3, len(self.candidate_categories))
top_results = torch.topk(sims, k=top_k)
best_categories = []
for idx in top_results.indices:
cat_text = self.candidate_categories[idx]
if age:
cat_text = f"{cat_text} for {age} year old"
best_categories.append(cat_text)
print("Embedding-based categories:", best_categories)
return best_categories
# ------------------------------------------------------------------
# Main recommendations
# ------------------------------------------------------------------
async def get_recommendations(self, text: str) -> List[Dict]:
"""
Search across Amazon, Flipkart, and IGP based on the top category matches.
"""
try:
# 1) Figure out best categories (queries) from user text
queries = self._extract_keywords(text)
# 2) Search each site for each query
all_products = []
for query in queries:
# For each query, hit Amazon, Flipkart, IGP
amazon_products = await self.search_amazon(query)
flipkart_products = await self.search_flipkart(query)
igp_products = await self.search_igp(query)
all_products.extend(amazon_products)
all_products.extend(flipkart_products)
all_products.extend(igp_products)
# 3) De‐duplicate by product name
seen = set()
unique_products = []
for product in all_products:
if product['name'] not in seen:
seen.add(product['name'])
unique_products.append(product)
# 4) Optionally slice or sort further
return unique_products[:5]
except Exception as e:
print(f"Error in recommendations: {str(e)}")
return []
|