Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ try:
|
|
9 |
except ImportError:
|
10 |
raise RuntimeError("Missing required dependency: flash_attn. Install with `pip install flash-attn --no-build-isolation`")
|
11 |
|
12 |
-
# Load DeepSeek-R1 model
|
13 |
model_name = "deepseek-ai/DeepSeek-R1"
|
14 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
15 |
|
@@ -26,7 +26,7 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
26 |
# Use a text-generation pipeline
|
27 |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
|
28 |
|
29 |
-
# π―
|
30 |
def extract_interests(text):
|
31 |
prompt = f"Extract 3-5 relevant interests from this request: '{text}'. Focus on hobbies and product preferences."
|
32 |
|
@@ -76,3 +76,5 @@ demo = gr.Interface(
|
|
76 |
# π Launch Gradio App
|
77 |
if __name__ == "__main__":
|
78 |
demo.launch()
|
|
|
|
|
|
9 |
except ImportError:
|
10 |
raise RuntimeError("Missing required dependency: flash_attn. Install with `pip install flash-attn --no-build-isolation`")
|
11 |
|
12 |
+
# Load DeepSeek-R1 model
|
13 |
model_name = "deepseek-ai/DeepSeek-R1"
|
14 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
15 |
|
|
|
26 |
# Use a text-generation pipeline
|
27 |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
|
28 |
|
29 |
+
# π― Extract interests from user input
|
30 |
def extract_interests(text):
|
31 |
prompt = f"Extract 3-5 relevant interests from this request: '{text}'. Focus on hobbies and product preferences."
|
32 |
|
|
|
76 |
# π Launch Gradio App
|
77 |
if __name__ == "__main__":
|
78 |
demo.launch()
|
79 |
+
|
80 |
+
|