noddysnots commited on
Commit
2f23ac1
Β·
verified Β·
1 Parent(s): 6ed4917

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -23
app.py CHANGED
@@ -1,27 +1,16 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
- import torch
4
- import requests
5
-
6
- # Load Qwen-2.5 model (Better compatibility)
7
- model_name = "qwen/Qwen2.5-7B-Chat"
8
- tokenizer = AutoTokenizer.from_pretrained(model_name)
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
-
11
- model = AutoModelForCausalLM.from_pretrained(
12
- model_name,
13
- torch_dtype=torch.float16 if device == "cuda" else torch.float32,
14
- device_map="auto"
15
- )
16
 
17
- # Text-generation pipeline
18
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
19
 
20
- # 🎯 Extract interests from user input
21
  def extract_interests(text):
22
  prompt = f"Extract 3-5 relevant interests from this request: '{text}'. Focus on hobbies and product preferences."
23
- response = generator(prompt, max_length=50, num_return_sequences=1)
 
24
  interests = response[0]["generated_text"].replace(prompt, "").strip()
 
25
  return interests.split(", ")
26
 
27
  # 🎁 Web search for gift suggestions
@@ -43,14 +32,19 @@ def search_gifts(interests):
43
  def recommend_gifts(text):
44
  if not text:
45
  return "Please enter a description."
 
46
  interests = extract_interests(text)
47
  links = search_gifts(interests)
48
- return {"Predicted Interests": interests, "Gift Suggestions": links}
49
 
50
- # 🎨 Gradio UI
 
 
 
 
 
51
  demo = gr.Interface(
52
- fn=recommend_gifts,
53
- inputs="text",
54
  outputs="json",
55
  title="🎁 AI Gift Recommender",
56
  description="Enter details about the person you are buying a gift for, and get personalized suggestions with shopping links!",
@@ -59,4 +53,3 @@ demo = gr.Interface(
59
  # πŸš€ Launch Gradio App
60
  if __name__ == "__main__":
61
  demo.launch()
62
-
 
1
  import gradio as gr
2
+ from transformers import pipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
+ # πŸ”Ή Load Qwen2.5-14B-Instruct-1M with a pipeline
5
+ pipe = pipeline("text-generation", model="Qwen/Qwen2.5-14B-Instruct-1M")
6
 
7
+ # 🎯 Function to extract interests from user input
8
  def extract_interests(text):
9
  prompt = f"Extract 3-5 relevant interests from this request: '{text}'. Focus on hobbies and product preferences."
10
+
11
+ response = pipe(prompt, max_length=50, num_return_sequences=1)
12
  interests = response[0]["generated_text"].replace(prompt, "").strip()
13
+
14
  return interests.split(", ")
15
 
16
  # 🎁 Web search for gift suggestions
 
32
  def recommend_gifts(text):
33
  if not text:
34
  return "Please enter a description."
35
+
36
  interests = extract_interests(text)
37
  links = search_gifts(interests)
 
38
 
39
+ return {
40
+ "Predicted Interests": interests,
41
+ "Gift Suggestions": links
42
+ }
43
+
44
+ # 🎨 Gradio UI for easy interaction
45
  demo = gr.Interface(
46
+ fn=recommend_gifts,
47
+ inputs="text",
48
  outputs="json",
49
  title="🎁 AI Gift Recommender",
50
  description="Enter details about the person you are buying a gift for, and get personalized suggestions with shopping links!",
 
53
  # πŸš€ Launch Gradio App
54
  if __name__ == "__main__":
55
  demo.launch()