DexterSptizu commited on
Commit
5fc9652
·
verified ·
1 Parent(s): 8211d9f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -0
app.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
+ import torch
4
+
5
+ # Model and tokenizer setup
6
+ model_id = "kingabzpro/Llama-3.1-8B-Instruct-Mental-Health-Classification"
7
+
8
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
9
+
10
+ model = AutoModelForCausalLM.from_pretrained(
11
+ model_id,
12
+ return_dict=True,
13
+ low_cpu_mem_usage=True,
14
+ torch_dtype=torch.float16,
15
+ device_map="auto",
16
+ trust_remote_code=True,
17
+ )
18
+
19
+ # Create the pipeline for text generation
20
+ pipe = pipeline(
21
+ "text-generation",
22
+ model=model,
23
+ tokenizer=tokenizer,
24
+ torch_dtype=torch.float16,
25
+ device_map="auto",
26
+ )
27
+
28
+ # Function to classify the text input
29
+ def classify_mental_health(text):
30
+ prompt = f"""Classify the text into Normal, Depression, Anxiety, Bipolar, and return the answer as the corresponding mental health disorder label.
31
+ text: {text}
32
+ label: """.strip()
33
+
34
+ # Generate the output using the model pipeline
35
+ outputs = pipe(prompt, max_new_tokens=2, do_sample=True, temperature=0.1)
36
+
37
+ # Extract the label from the output
38
+ label = outputs[0]["generated_text"].split("label: ")[-1].strip()
39
+ return label
40
+
41
+ # Gradio interface
42
+ with gr.Blocks() as demo:
43
+ gr.Markdown("## Mental Health Text Classification")
44
+
45
+ text_input = gr.Textbox(label="Enter your text:")
46
+ label_output = gr.Textbox(label="Predicted Mental Health Label")
47
+
48
+ btn = gr.Button("Classify")
49
+
50
+ # On button click, classify the input text
51
+ btn.click(classify_mental_health, inputs=text_input, outputs=label_output)
52
+
53
+ demo.launch()