noddysnots commited on
Commit
6ed4917
Β·
verified Β·
1 Parent(s): d8fa37b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -22
app.py CHANGED
@@ -3,32 +3,26 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  import torch
4
  import requests
5
 
6
- # Load DeepSeek-R1 model
7
- model_name = "deepseek-ai/DeepSeek-R1"
8
- tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
9
-
10
- # Ensure compatibility and force execution on GPU if available
11
  device = "cuda" if torch.cuda.is_available() else "cpu"
12
 
13
  model = AutoModelForCausalLM.from_pretrained(
14
  model_name,
15
- torch_dtype=torch.float16 if device == "cuda" else torch.float32, # Prevents fp8 errors
16
- device_map="auto",
17
- trust_remote_code=True
18
  )
19
 
20
- # Use a text-generation pipeline
21
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
22
 
23
  # 🎯 Extract interests from user input
24
  def extract_interests(text):
25
  prompt = f"Extract 3-5 relevant interests from this request: '{text}'. Focus on hobbies and product preferences."
26
-
27
- # Generate model output
28
  response = generator(prompt, max_length=50, num_return_sequences=1)
29
  interests = response[0]["generated_text"].replace(prompt, "").strip()
30
-
31
- return interests.split(", ") # Convert to a list of keywords
32
 
33
  # 🎁 Web search for gift suggestions
34
  def search_gifts(interests):
@@ -49,16 +43,11 @@ def search_gifts(interests):
49
  def recommend_gifts(text):
50
  if not text:
51
  return "Please enter a description."
 
 
 
52
 
53
- interests = extract_interests(text) # Extract interests using DeepSeek R1
54
- links = search_gifts(interests) # Get shopping links
55
-
56
- return {
57
- "Predicted Interests": interests,
58
- "Gift Suggestions": links
59
- }
60
-
61
- # 🎨 Gradio UI for easy interaction
62
  demo = gr.Interface(
63
  fn=recommend_gifts,
64
  inputs="text",
 
3
  import torch
4
  import requests
5
 
6
+ # Load Qwen-2.5 model (Better compatibility)
7
+ model_name = "qwen/Qwen2.5-7B-Chat"
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
 
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
 
11
  model = AutoModelForCausalLM.from_pretrained(
12
  model_name,
13
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
14
+ device_map="auto"
 
15
  )
16
 
17
+ # Text-generation pipeline
18
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
19
 
20
  # 🎯 Extract interests from user input
21
  def extract_interests(text):
22
  prompt = f"Extract 3-5 relevant interests from this request: '{text}'. Focus on hobbies and product preferences."
 
 
23
  response = generator(prompt, max_length=50, num_return_sequences=1)
24
  interests = response[0]["generated_text"].replace(prompt, "").strip()
25
+ return interests.split(", ")
 
26
 
27
  # 🎁 Web search for gift suggestions
28
  def search_gifts(interests):
 
43
  def recommend_gifts(text):
44
  if not text:
45
  return "Please enter a description."
46
+ interests = extract_interests(text)
47
+ links = search_gifts(interests)
48
+ return {"Predicted Interests": interests, "Gift Suggestions": links}
49
 
50
+ # 🎨 Gradio UI
 
 
 
 
 
 
 
 
51
  demo = gr.Interface(
52
  fn=recommend_gifts,
53
  inputs="text",