Update app.py
Browse files
app.py
CHANGED
@@ -11,11 +11,14 @@ tokenizer.pad_token_id = tokenizer.eos_token_id
|
|
11 |
model = AutoModelForSequenceClassification.from_pretrained(MODEL_URL, low_cpu_mem_usage=True, return_dict=True,torch_dtype=torch.float16,
|
12 |
device_map="cpu")
|
13 |
|
14 |
-
def prediction(
|
15 |
# create pipeline
|
16 |
-
|
17 |
device_map="cpu",)
|
18 |
|
|
|
|
|
|
|
19 |
outputs = pipe(prompt, max_new_tokens=2, do_sample=True, temperature=0.1)
|
20 |
preds = outputs[0]["generated_text"].split("label: ")[-1].strip()
|
21 |
|
|
|
11 |
model = AutoModelForSequenceClassification.from_pretrained(MODEL_URL, low_cpu_mem_usage=True, return_dict=True,torch_dtype=torch.float16,
|
12 |
device_map="cpu")
|
13 |
|
14 |
+
def prediction(text):
|
15 |
# create pipeline
|
16 |
+
pipe = pipeline("text-generation", tokenizer=tokenizer, model=model, torch_dtype=torch.float16,
|
17 |
device_map="cpu",)
|
18 |
|
19 |
+
prompt = f"""Classify the text into Normal, Depression, Anxiety, Bipolar, and return the answer as the corresponding mental health disorder label.
|
20 |
+
text: {text}
|
21 |
+
label: """.strip()
|
22 |
outputs = pipe(prompt, max_new_tokens=2, do_sample=True, temperature=0.1)
|
23 |
preds = outputs[0]["generated_text"].split("label: ")[-1].strip()
|
24 |
|