Blaiseboy commited on
Commit
c1248ca
·
verified ·
1 Parent(s): eda861e

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -700
app.py DELETED
@@ -1,700 +0,0 @@
1
- # BioGPT Medical Chatbot with Gradio Interface - HUGGING FACE SPACES VERSION
2
-
3
- import gradio as gr
4
- import torch
5
- import warnings
6
- import numpy as np
7
- import os
8
- import re
9
- import time
10
- from datetime import datetime
11
- from typing import List, Dict, Optional, Tuple
12
- import json
13
-
14
- # Install required packages if not already installed
15
- try:
16
- from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
17
- from sentence_transformers import SentenceTransformer
18
- import faiss
19
- except ImportError:
20
- print("Installing required packages...")
21
- import subprocess
22
- import sys
23
-
24
- packages = [
25
- "transformers>=4.21.0",
26
- "torch>=1.12.0",
27
- "sentence-transformers",
28
- "faiss-cpu",
29
- "accelerate",
30
- "bitsandbytes",
31
- "datasets",
32
- "numpy",
33
- "sacremoses",
34
- "scipy"
35
- ]
36
-
37
- for package in packages:
38
- try:
39
- subprocess.check_call([sys.executable, "-m", "pip", "install", package])
40
- except Exception as e:
41
- print(f"Failed to install {package}: {e}")
42
-
43
- from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
44
- from sentence_transformers import SentenceTransformer
45
- import faiss
46
-
47
- # Suppress warnings
48
- warnings.filterwarnings('ignore')
49
-
50
- class GradioBioGPTChatbot:
51
- def __init__(self, use_gpu=False, use_8bit=False): # Default to CPU for HF Spaces
52
- """Initialize BioGPT chatbot for Gradio deployment"""
53
- self.device = "cuda" if torch.cuda.is_available() and use_gpu else "cpu"
54
- self.use_8bit = use_8bit and torch.cuda.is_available()
55
-
56
- print(f"🔧 Initializing on device: {self.device}")
57
-
58
- # Initialize components with error handling
59
- try:
60
- self.setup_embeddings()
61
- self.setup_faiss_index()
62
- self.setup_biogpt()
63
- except Exception as e:
64
- print(f"❌ Initialization error: {e}")
65
- self.model = None
66
- self.tokenizer = None
67
- self.embedding_model = None
68
-
69
- # Conversation tracking
70
- self.conversation_history = []
71
- self.knowledge_chunks = []
72
- self.is_data_loaded = False
73
-
74
- def setup_embeddings(self):
75
- """Setup medical-optimized embeddings with error handling"""
76
- try:
77
- print("🔄 Loading embedding model...")
78
- self.embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
79
- self.embedding_dim = self.embedding_model.get_sentence_embedding_dimension()
80
- self.use_embeddings = True
81
- print("✅ Embeddings loaded successfully")
82
- except Exception as e:
83
- print(f"❌ Embeddings setup failed: {e}")
84
- self.embedding_model = None
85
- self.embedding_dim = 384
86
- self.use_embeddings = False
87
-
88
- def setup_faiss_index(self):
89
- """Setup FAISS for vector search with error handling"""
90
- try:
91
- print("🔄 Setting up FAISS index...")
92
- self.faiss_index = faiss.IndexFlatIP(self.embedding_dim)
93
- self.faiss_ready = True
94
- print("✅ FAISS index ready")
95
- except Exception as e:
96
- print(f"❌ FAISS setup failed: {e}")
97
- self.faiss_index = None
98
- self.faiss_ready = False
99
-
100
- def setup_biogpt(self):
101
- """Setup BioGPT model with optimizations and fallbacks"""
102
- print("🔄 Loading BioGPT model...")
103
-
104
- # Try BioGPT first, with fallbacks
105
- models_to_try = [
106
- "microsoft/BioGPT", # Smaller version first
107
- "microsoft/DialoGPT-medium", # Fallback 1
108
- "microsoft/DialoGPT-small", # Fallback 2
109
- "gpt2" # Final fallback
110
- ]
111
-
112
- for model_name in models_to_try:
113
- try:
114
- print(f"🔄 Trying model: {model_name}")
115
-
116
- # Load tokenizer
117
- self.tokenizer = AutoTokenizer.from_pretrained(model_name)
118
- if self.tokenizer.pad_token is None:
119
- self.tokenizer.pad_token = self.tokenizer.eos_token
120
-
121
- # Load model with minimal config for HF Spaces
122
- if self.device == "cuda" and self.use_8bit:
123
- quantization_config = BitsAndBytesConfig(
124
- load_in_8bit=True,
125
- llm_int8_threshold=6.0,
126
- )
127
- else:
128
- quantization_config = None
129
-
130
- self.model = AutoModelForCausalLM.from_pretrained(
131
- model_name,
132
- quantization_config=quantization_config,
133
- torch_dtype=torch.float16 if self.device == "cuda" else torch.float32,
134
- device_map="auto" if self.device == "cuda" else None,
135
- trust_remote_code=True,
136
- low_cpu_mem_usage=True # Important for HF Spaces
137
- )
138
-
139
- if self.device == "cpu":
140
- self.model = self.model.to(self.device)
141
-
142
- print(f"✅ Successfully loaded: {model_name}")
143
- break
144
-
145
- except Exception as e:
146
- print(f"❌ Failed to load {model_name}: {e}")
147
- continue
148
- else:
149
- print("❌ All models failed to load")
150
- self.model = None
151
- self.tokenizer = None
152
-
153
- def create_medical_chunks(self, text: str, chunk_size: int = 300) -> List[Dict]:
154
- """Create medically-optimized text chunks with smaller size for efficiency"""
155
- chunks = []
156
-
157
- # Split by medical sections first
158
- medical_sections = self.split_by_medical_sections(text)
159
-
160
- chunk_id = 0
161
- for section in medical_sections:
162
- if len(section.split()) > chunk_size:
163
- # Split large sections by sentences
164
- sentences = re.split(r'[.!?]+', section)
165
- current_chunk = ""
166
-
167
- for sentence in sentences:
168
- sentence = sentence.strip()
169
- if not sentence:
170
- continue
171
-
172
- if len(current_chunk.split()) + len(sentence.split()) < chunk_size:
173
- current_chunk += sentence + ". "
174
- else:
175
- if current_chunk.strip():
176
- chunks.append({
177
- 'id': chunk_id,
178
- 'text': current_chunk.strip(),
179
- 'medical_focus': self.identify_medical_focus(current_chunk)
180
- })
181
- chunk_id += 1
182
- current_chunk = sentence + ". "
183
-
184
- if current_chunk.strip():
185
- chunks.append({
186
- 'id': chunk_id,
187
- 'text': current_chunk.strip(),
188
- 'medical_focus': self.identify_medical_focus(current_chunk)
189
- })
190
- chunk_id += 1
191
- else:
192
- if section.strip(): # Don't add empty sections
193
- chunks.append({
194
- 'id': chunk_id,
195
- 'text': section,
196
- 'medical_focus': self.identify_medical_focus(section)
197
- })
198
- chunk_id += 1
199
-
200
- return chunks
201
-
202
- def split_by_medical_sections(self, text: str) -> List[str]:
203
- """Split text by medical sections"""
204
- section_patterns = [
205
- r'\n\s*(?:SYMPTOMS?|TREATMENT|DIAGNOSIS|CAUSES?|PREVENTION|MANAGEMENT).*?\n',
206
- r'\n\s*\d+\.\s+',
207
- r'\n\n+'
208
- ]
209
-
210
- sections = [text]
211
- for pattern in section_patterns:
212
- new_sections = []
213
- for section in sections:
214
- splits = re.split(pattern, section, flags=re.IGNORECASE)
215
- new_sections.extend([s.strip() for s in splits if len(s.strip()) > 50]) # Reduced minimum length
216
- sections = new_sections
217
-
218
- return sections
219
-
220
- def identify_medical_focus(self, text: str) -> str:
221
- """Identify the medical focus of a text chunk"""
222
- text_lower = text.lower()
223
-
224
- categories = {
225
- 'pediatric_symptoms': ['fever', 'cough', 'rash', 'vomiting', 'diarrhea', 'child', 'baby', 'infant'],
226
- 'treatments': ['treatment', 'therapy', 'medication', 'antibiotics', 'medicine'],
227
- 'diagnosis': ['diagnosis', 'diagnostic', 'symptoms', 'signs', 'condition'],
228
- 'emergency': ['emergency', 'urgent', 'serious', 'hospital', 'call doctor'],
229
- 'prevention': ['prevention', 'vaccine', 'immunization', 'avoid', 'prevent']
230
- }
231
-
232
- for category, keywords in categories.items():
233
- if any(keyword in text_lower for keyword in keywords):
234
- return category
235
-
236
- return 'general_medical'
237
-
238
- def load_medical_data_from_file(self, file_path: str) -> Tuple[str, bool]:
239
- """Load medical data from uploaded file with better error handling"""
240
- if not file_path or not os.path.exists(file_path):
241
- return "❌ No file uploaded or file not found.", False
242
-
243
- try:
244
- print(f"🔄 Processing file: {file_path}")
245
-
246
- # Read file with encoding detection
247
- encodings_to_try = ['utf-8', 'utf-8-sig', 'latin-1', 'cp1252']
248
- text = None
249
-
250
- for encoding in encodings_to_try:
251
- try:
252
- with open(file_path, 'r', encoding=encoding) as f:
253
- text = f.read()
254
- print(f"✅ File read successfully with {encoding} encoding")
255
- break
256
- except UnicodeDecodeError:
257
- continue
258
-
259
- if text is None:
260
- return "❌ Could not read file. Please ensure it's a valid text file.", False
261
-
262
- if len(text.strip()) < 100:
263
- return "❌ File appears to be too short or empty. Please upload a substantial medical text.", False
264
-
265
- # Create chunks
266
- print("🔄 Creating medical chunks...")
267
- chunks = self.create_medical_chunks(text)
268
-
269
- if not chunks:
270
- return "❌ No valid medical content found in the file.", False
271
-
272
- self.knowledge_chunks = chunks
273
- print(f"✅ Created {len(chunks)} chunks")
274
-
275
- # Generate embeddings if available
276
- if self.use_embeddings and self.embedding_model and self.faiss_ready:
277
- print("🔄 Generating embeddings...")
278
- success = self.generate_embeddings_and_index(chunks)
279
- if success:
280
- self.is_data_loaded = True
281
- return f"✅ Medical data loaded successfully! {len(chunks)} chunks processed with vector search.", True
282
-
283
- self.is_data_loaded = True
284
- return f"✅ Medical data loaded successfully! {len(chunks)} chunks processed (keyword search mode).", True
285
-
286
- except Exception as e:
287
- print(f"❌ Error processing file: {e}")
288
- return f"❌ Error loading file: {str(e)}", False
289
-
290
- def generate_embeddings_and_index(self, chunks: List[Dict]) -> bool:
291
- """Generate embeddings and add to FAISS index with error handling"""
292
- try:
293
- print("🔄 Generating embeddings...")
294
- texts = [chunk['text'] for chunk in chunks]
295
-
296
- # Process in batches to avoid memory issues
297
- batch_size = 32
298
- all_embeddings = []
299
-
300
- for i in range(0, len(texts), batch_size):
301
- batch_texts = texts[i:i+batch_size]
302
- batch_embeddings = self.embedding_model.encode(batch_texts, show_progress_bar=False)
303
- all_embeddings.append(batch_embeddings)
304
-
305
- embeddings = np.vstack(all_embeddings)
306
- self.faiss_index.add(embeddings.astype('float32'))
307
- print(f"✅ Added {len(embeddings)} embeddings to FAISS index")
308
- return True
309
-
310
- except Exception as e:
311
- print(f"❌ Embedding generation failed: {e}")
312
- return False
313
-
314
- def retrieve_medical_context(self, query: str, n_results: int = 3) -> List[str]:
315
- """Retrieve relevant medical context with fallback"""
316
- if not self.knowledge_chunks:
317
- return []
318
-
319
- if self.use_embeddings and self.embedding_model and self.faiss_ready:
320
- try:
321
- query_embedding = self.embedding_model.encode([query])
322
- distances, indices = self.faiss_index.search(query_embedding.astype('float32'), n_results)
323
- context_chunks = []
324
- for i in indices[0]:
325
- if i != -1 and i < len(self.knowledge_chunks):
326
- context_chunks.append(self.knowledge_chunks[i]['text'])
327
-
328
- if context_chunks:
329
- return context_chunks
330
- except Exception as e:
331
- print(f"❌ Embedding search failed: {e}")
332
-
333
- # Fallback to keyword search
334
- return self.keyword_search_medical(query, n_results)
335
-
336
- def keyword_search_medical(self, query: str, n_results: int) -> List[str]:
337
- """Medical-focused keyword search"""
338
- if not self.knowledge_chunks:
339
- return []
340
-
341
- query_words = set(query.lower().split())
342
- chunk_scores = []
343
-
344
- for chunk_info in self.knowledge_chunks:
345
- chunk_text = chunk_info['text']
346
- chunk_words = set(chunk_text.lower().split())
347
-
348
- word_overlap = len(query_words.intersection(chunk_words))
349
- base_score = word_overlap / len(query_words) if query_words else 0
350
-
351
- # Boost medical content
352
- medical_boost = 0
353
- if chunk_info.get('medical_focus') in ['pediatric_symptoms', 'treatments', 'diagnosis']:
354
- medical_boost = 0.3
355
-
356
- final_score = base_score + medical_boost
357
-
358
- if final_score > 0:
359
- chunk_scores.append((final_score, chunk_text))
360
-
361
- chunk_scores.sort(reverse=True)
362
- return [chunk for _, chunk in chunk_scores[:n_results]]
363
-
364
- def generate_biogpt_response(self, context: str, query: str) -> str:
365
- """Generate medical response using loaded model"""
366
- if not self.model or not self.tokenizer:
367
- return "Medical AI model is not available. Using fallback response based on retrieved context."
368
-
369
- try:
370
- # Simplified prompt for better compatibility
371
- prompt = f"Context: {context[:600]}\n\nQuestion: {query}\n\nAnswer:"
372
-
373
- inputs = self.tokenizer(
374
- prompt,
375
- return_tensors="pt",
376
- truncation=True,
377
- max_length=512, # Reduced for efficiency
378
- padding=True
379
- )
380
-
381
- if self.device == "cuda":
382
- inputs = {k: v.to(self.device) for k, v in inputs.items()}
383
-
384
- with torch.no_grad():
385
- outputs = self.model.generate(
386
- **inputs,
387
- max_new_tokens=100, # Reduced for efficiency
388
- do_sample=True,
389
- temperature=0.7,
390
- top_p=0.9,
391
- pad_token_id=self.tokenizer.eos_token_id,
392
- repetition_penalty=1.1,
393
- no_repeat_ngram_size=3
394
- )
395
-
396
- full_response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
397
-
398
- if "Answer:" in full_response:
399
- generated_response = full_response.split("Answer:")[-1].strip()
400
- else:
401
- generated_response = full_response[len(prompt):].strip()
402
-
403
- return self.clean_medical_response(generated_response) if generated_response else self.fallback_response(context, query)
404
-
405
- except Exception as e:
406
- print(f"❌ Generation failed: {e}")
407
- return self.fallback_response(context, query)
408
-
409
- def clean_medical_response(self, response: str) -> str:
410
- """Clean and format medical response"""
411
- if not response:
412
- return "I couldn't generate a specific response. Please consult a healthcare professional."
413
-
414
- # Remove incomplete sentences and clean up
415
- sentences = re.split(r'[.!?]+', response)
416
- clean_sentences = []
417
-
418
- for sentence in sentences:
419
- sentence = sentence.strip()
420
- if len(sentence) > 15 and not sentence.endswith(('and', 'or', 'but', 'however', 'the', 'a', 'an')):
421
- clean_sentences.append(sentence)
422
- if len(clean_sentences) >= 2: # Limit to 2 sentences for clarity
423
- break
424
-
425
- if clean_sentences:
426
- cleaned = '. '.join(clean_sentences) + '.'
427
- else:
428
- cleaned = response[:150] + '...' if len(response) > 150 else response
429
-
430
- return cleaned
431
-
432
- def fallback_response(self, context: str, query: str) -> str:
433
- """Fallback response when model generation fails"""
434
- if not context:
435
- return "I don't have specific information about this topic in my medical database. Please consult with a healthcare professional."
436
-
437
- # Extract most relevant sentences from context
438
- sentences = [s.strip() for s in context.split('.') if len(s.strip()) > 20]
439
-
440
- if sentences:
441
- # Return first 1-2 most relevant sentences
442
- response = sentences[0]
443
- if len(sentences) > 1 and len(response) < 100:
444
- response += '. ' + sentences[1]
445
- response += '.'
446
- else:
447
- response = context[:200] + '...' if len(context) > 200 else context
448
-
449
- return response
450
-
451
- def handle_conversational_interactions(self, query: str) -> Optional[str]:
452
- """Handle conversational interactions"""
453
- query_lower = query.lower().strip()
454
-
455
- # Greetings
456
- if query_lower in ['hello', 'hi', 'hey', 'good morning', 'good afternoon']:
457
- if not self.is_data_loaded:
458
- return "👋 Hello! I'm your medical AI assistant. Please upload your medical data file first, then ask me any health-related questions!"
459
- else:
460
- return \"👋 Hello again! I'm ready to help. Ask me any medical question related to your uploaded data.\"
461
-
462
- # Thanks
463
- if any(thanks in query_lower for thanks in ['thank you', 'thanks', 'thx', 'appreciate']):
464
- return "🙏 You're welcome! Remember to always consult healthcare professionals for medical decisions. Feel free to ask more questions!"
465
-
466
- # Goodbyes
467
- if any(bye in query_lower for bye in ['bye', 'goodbye', 'see you', 'farewell']):
468
- return "👋 Goodbye! Take care and stay healthy! 🏥"
469
-
470
- # Help/About
471
- if any(help_word in query_lower for help_word in ['help', 'what can you do', 'how do you work']):
472
- return """🤖 **Medical AI Assistant**
473
-
474
- I can help with:
475
- • Medical information and conditions
476
- • Symptom understanding
477
- • Treatment information
478
- • When to seek medical care
479
-
480
- **How to use:**
481
- 1. Upload your medical data file
482
- 2. Ask specific medical questions
483
- 3. Get evidence-based information
484
-
485
- ⚠️ **Important:** I provide educational information only. Always consult healthcare professionals for medical advice."""
486
-
487
- return None
488
-
489
- def chat_interface(self, message: str, history: List[List[str]]) -> Tuple[str, List[List[str]]]:
490
- """Main chat interface for Gradio"""
491
- if not message.strip():
492
- return "", history
493
-
494
- # Check if data is loaded
495
- if not self.is_data_loaded:
496
- response = "⚠️ Please upload your medical data file first using the file upload above before asking questions."
497
- history.append([message, response])
498
- return "", history
499
-
500
- # Handle conversational interactions
501
- conversational_response = self.handle_conversational_interactions(message)
502
- if conversational_response:
503
- history.append([message, conversational_response])
504
- return "", history
505
-
506
- # Process medical query
507
- try:
508
- context = self.retrieve_medical_context(message)
509
-
510
- if not context:
511
- response = "I don't have specific information about this topic in my medical database. Please consult with a healthcare professional for personalized medical advice."
512
- else:
513
- main_context = '\n\n'.join(context)
514
- medical_response = self.generate_biogpt_response(main_context, message)
515
- response = f"🩺 **Medical Information:** {medical_response}\n\n⚠️ **Important:** This information is for educational purposes only. Always consult with qualified healthcare professionals for medical diagnosis, treatment, and personalized advice."
516
-
517
- # Add to conversation history
518
- self.conversation_history.append({
519
- 'query': message,
520
- 'response': response,
521
- 'timestamp': datetime.now().isoformat()
522
- })
523
-
524
- history.append([message, response])
525
- return "", history
526
-
527
- except Exception as e:
528
- print(f"❌ Chat interface error: {e}")
529
- error_response = "I encountered an error processing your question. Please try again or consult a healthcare professional."
530
- history.append([message, error_response])
531
- return "", history
532
-
533
- # Initialize the chatbot with error handling
534
- print("🚀 Initializing Medical AI Assistant...")
535
- try:
536
- chatbot = GradioBioGPTChatbot(use_gpu=False, use_8bit=False) # CPU-optimized for HF Spaces
537
- print("✅ Chatbot initialized successfully")
538
- except Exception as e:
539
- print(f"❌ Chatbot initialization failed: {e}")
540
- chatbot = None
541
-
542
- def upload_and_process_file(file):
543
- """Handle file upload and processing"""
544
- if file is None:
545
- return "❌ No file uploaded."
546
-
547
- if chatbot is None:
548
- return "❌ Chatbot not initialized properly. Please refresh the page."
549
-
550
- try:
551
- message, success = chatbot.load_medical_data_from_file(file)
552
- return message
553
- except Exception as e:
554
- return f"❌ Error processing file: {str(e)}"
555
-
556
- # Create Gradio Interface
557
- def create_gradio_interface():
558
- """Create and launch Gradio interface"""
559
-
560
- with gr.Blocks(
561
- title="🏥 Medical AI Assistant",
562
- theme=gr.themes.Soft(),
563
- css="""
564
- .gradio-container {
565
- max-width: 1200px !important;
566
- }
567
- .chat-message {
568
- border-radius: 10px !important;
569
- }
570
- """
571
- ) as demo:
572
-
573
- gr.HTML("""
574
- <div style="text-align: center; padding: 20px;">
575
- <h1>🏥 Medical AI Assistant</h1>
576
- <p style="font-size: 18px; color: #666;">
577
- AI-powered medical information assistant
578
- </p>
579
- <p style="color: #888;">
580
- ⚠️ For educational purposes only. Always consult healthcare professionals for medical advice.
581
- </p>
582
- </div>
583
- """)
584
-
585
- with gr.Row():
586
- with gr.Column(scale=1):
587
- gr.HTML("<h3>📁 Upload Medical Data</h3>")
588
- file_upload = gr.File(
589
- label="Upload Medical Text File (.txt)",
590
- file_types=[".txt"],
591
- type="filepath"
592
- )
593
- upload_status = gr.Textbox(
594
- label="Upload Status",
595
- value="📋 Please upload your medical data file to begin...",
596
- interactive=False,
597
- lines=3
598
- )
599
-
600
- gr.HTML("""
601
- <div style="margin-top: 20px; padding: 15px; background-color: #f0f8ff; border-radius: 10px;">
602
- <h4>💡 How to Use:</h4>
603
- <ol>
604
- <li>Upload your medical text file (.txt format)</li>
605
- <li>Wait for processing confirmation</li>
606
- <li>Start asking medical questions!</li>
607
- </ol>
608
-
609
- <h4>📝 Example Questions:</h4>
610
- <ul>
611
- <li>"What causes fever in children?"</li>
612
- <li>"How to treat a persistent cough?"</li>
613
- <li>"When should I call the doctor?"</li>
614
- <li>"Signs of dehydration in infants?"</li>
615
- </ul>
616
- </div>
617
- """)
618
-
619
- with gr.Column(scale=2):
620
- gr.HTML("<h3>💬 Medical Consultation</h3>")
621
- chatbot_interface = gr.Chatbot(
622
- label="Medical AI Chat",
623
- height=500,
624
- bubble_full_width=False
625
- )
626
-
627
- msg_input = gr.Textbox(
628
- label="Your Medical Question",
629
- placeholder="Ask me about health topics, symptoms, treatments, or when to seek care...",
630
- lines=2
631
- )
632
-
633
- with gr.Row():
634
- send_btn = gr.Button("🩺 Send Question", variant="primary")
635
- clear_btn = gr.Button("🗑️ Clear Chat", variant="secondary")
636
-
637
- # Event handlers with error handling
638
- def safe_upload_handler(file):
639
- try:
640
- return upload_and_process_file(file)
641
- except Exception as e:
642
- return f"❌ Upload error: {str(e)}"
643
-
644
- def safe_chat_handler(message, history):
645
- try:
646
- if chatbot is None:
647
- return "", history + [[message, "❌ System error. Please refresh the page."]]
648
- return chatbot.chat_interface(message, history)
649
- except Exception as e:
650
- return "", history + [[message, f"❌ Error: {str(e)}"]]
651
-
652
- file_upload.change(
653
- fn=safe_upload_handler,
654
- inputs=[file_upload],
655
- outputs=[upload_status]
656
- )
657
-
658
- msg_input.submit(
659
- fn=safe_chat_handler,
660
- inputs=[msg_input, chatbot_interface],
661
- outputs=[msg_input, chatbot_interface]
662
- )
663
-
664
- send_btn.click(
665
- fn=safe_chat_handler,
666
- inputs=[msg_input, chatbot_interface],
667
- outputs=[msg_input, chatbot_interface]
668
- )
669
-
670
- clear_btn.click(
671
- fn=lambda: ([], ""),
672
- outputs=[chatbot_interface, msg_input]
673
- )
674
-
675
- gr.HTML("""
676
- <div style="text-align: center; margin-top: 30px; padding: 20px; background-color: #fff3cd; border-radius: 10px;">
677
- <h4>⚠️ Medical Disclaimer</h4>
678
- <p>This AI assistant provides educational medical information only and is not a substitute for professional medical advice, diagnosis, or treatment. Always seek the advice of qualified healthcare providers with questions about medical conditions.</p>
679
- </div>
680
- """)
681
-
682
- return demo
683
-
684
- if __name__ == "__main__":
685
- # Create and launch the Gradio interface
686
- demo = create_gradio_interface()
687
-
688
- print("🌐 Launching Gradio interface...")
689
- print("📋 Upload your medical data file and start chatting!")
690
-
691
- # Launch with HF Spaces optimized settings
692
- demo.launch(
693
- share=False,
694
- server_name="0.0.0.0",
695
- server_port=7860,
696
- show_error=True,
697
- show_tips=False,
698
- enable_queue=True,
699
- max_threads=40
700
- )