Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse files- README.md +9 -8
- app.py +63 -0
- requirements.txt +7 -0
README.md
CHANGED
@@ -1,12 +1,13 @@
|
|
1 |
---
|
2 |
-
title: MedicalChatbot
|
3 |
-
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: blue
|
6 |
sdk: gradio
|
7 |
sdk_version: 5.15.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
title: MedicalChatbot-Phi3.5-mini
|
3 |
+
app_file: app.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
sdk_version: 5.15.0
|
|
|
|
|
6 |
---
|
7 |
+
title: RomanEng2Nep-v2
|
8 |
+
emoji: ⚕️
|
9 |
+
colorFrom: purple
|
10 |
+
colorTo: red
|
11 |
+
app_file: app.py
|
12 |
+
sdk: gradio
|
13 |
+
sdk_version: 5.15.0
|
app.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
+
import torch
|
3 |
+
import gradio as gr
|
4 |
+
import spaces
|
5 |
+
|
6 |
+
torch.cuda.empty_cache()
|
7 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
8 |
+
print(f"Using device: {device}")
|
9 |
+
|
10 |
+
model_name = "syubraj/MedicalChat-Phi-3.5-mini-instruct"
|
11 |
+
try:
|
12 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
|
13 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
14 |
+
print("Model and Tokenizer loaded successfully.")
|
15 |
+
except Exception as e:
|
16 |
+
raise ValueError(f"Error loading Model and Tokenizer: {e}")
|
17 |
+
|
18 |
+
@spaces.GPU
|
19 |
+
def generate_response(user_query: str, system_message: str = None, max_length: int = 1024) -> str:
|
20 |
+
"""
|
21 |
+
Generates a response based on the given user query.
|
22 |
+
:param user_query: The user's input message.
|
23 |
+
:param system_message: Custom system instruction (optional, defaults to medical assistant).
|
24 |
+
:param max_length: Max tokens to generate.
|
25 |
+
:return: Generated assistant response.
|
26 |
+
"""
|
27 |
+
if not user_query.strip():
|
28 |
+
return "Error: User query cannot be empty."
|
29 |
+
|
30 |
+
if system_message is None:
|
31 |
+
system_message = ("You are a trusted AI-powered medical assistant. "
|
32 |
+
"Analyze patient queries carefully and provide accurate, professional, and empathetic responses. "
|
33 |
+
"Prioritize patient safety, adhere to medical best practices, and recommend consulting a healthcare provider when necessary.")
|
34 |
+
|
35 |
+
prompt = f"<|system|> {system_message} <|end|> <|user|> {user_query} <|end|> <|assistant|>"
|
36 |
+
try:
|
37 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
38 |
+
outputs = model.generate(**inputs, max_length=max_length)
|
39 |
+
# Decode response
|
40 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
41 |
+
return response.split("<|assistant|>")[-1].strip().split("<|end|>")[0].strip()
|
42 |
+
except Exception as e:
|
43 |
+
return f"Error generating response: {e}"
|
44 |
+
|
45 |
+
# Gradio Interface
|
46 |
+
def chat_interface(user_query, system_message=None):
|
47 |
+
response = generate_response(user_query, system_message)
|
48 |
+
return response
|
49 |
+
|
50 |
+
with gr.Blocks() as demo:
|
51 |
+
gr.Markdown("# Medical Chatbot")
|
52 |
+
gr.Markdown("Ask your medical questions, and the AI will provide professional responses.")
|
53 |
+
|
54 |
+
with gr.Row():
|
55 |
+
user_query = gr.Textbox(label="Your Query", placeholder="Enter your question here...", lines=3)
|
56 |
+
system_message = gr.Textbox(label="System Message (Optional)", placeholder="Custom system instruction...", lines=3)
|
57 |
+
|
58 |
+
submit_button = gr.Button("Submit")
|
59 |
+
output = gr.Textbox(label="Assistant Response", lines=5)
|
60 |
+
|
61 |
+
submit_button.click(chat_interface, inputs=[user_query, system_message], outputs=output)
|
62 |
+
|
63 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers==4.48.2
|
2 |
+
peft==0.14.0
|
3 |
+
bitsandbytes==0.45.1
|
4 |
+
gradio==5.15.0
|
5 |
+
accelerate==1.3.0
|
6 |
+
torch==2.4.0
|
7 |
+
spaces==0.32.0
|