Axcel1 commited on
Commit
401ee04
·
verified ·
1 Parent(s): 42a8cd2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +426 -0
app.py CHANGED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import threading
4
+ import time
5
+ from pathlib import Path
6
+ from huggingface_hub import login
7
+
8
+
9
+ # Try to import llama-cpp-python, fallback to instructions if not available
10
+ try:
11
+ from llama_cpp import Llama
12
+ LLAMA_CPP_AVAILABLE = True
13
+ except ImportError:
14
+ LLAMA_CPP_AVAILABLE = False
15
+ print("llama-cpp-python not installed. Please install it with: pip install llama-cpp-python")
16
+
17
+ hf_token = os.environ.get("HF_TOKEN")
18
+
19
+ login(token = hf_token)
20
+
21
+
22
+ # Global variables for model
23
+ model = None
24
+ model_loaded = False
25
+
26
+ def find_gguf_file(directory="."):
27
+ """Find GGUF files in the specified directory"""
28
+ gguf_files = []
29
+ for root, dirs, files in os.walk(directory):
30
+ for file in files:
31
+ if file.endswith('.gguf'):
32
+ gguf_files.append(os.path.join(root, file))
33
+ return gguf_files
34
+
35
+ def get_optimal_settings():
36
+ """Get optimal CPU threads and GPU layers automatically"""
37
+ # Auto-detect CPU threads (use all available cores)
38
+ n_threads = os.cpu_count()
39
+
40
+ # Auto-detect GPU layers (try to use GPU if available)
41
+ n_gpu_layers = 0
42
+ try:
43
+ # Try to detect if CUDA is available
44
+ import subprocess
45
+ result = subprocess.run(['nvidia-smi'], capture_output=True, text=True)
46
+ if result.returncode == 0:
47
+ # NVIDIA GPU detected, use more layers
48
+ n_gpu_layers = 35 # Good default for Llama-3-8B
49
+ except:
50
+ # No GPU or CUDA not available
51
+ n_gpu_layers = 0
52
+
53
+ return n_threads, n_gpu_layers
54
+
55
+ def load_model_from_huggingface(repo_id, filename, n_ctx=2048):
56
+ """Load the model from Hugging Face repository"""
57
+ global model, model_loaded
58
+
59
+ if not LLAMA_CPP_AVAILABLE:
60
+ return False, "llama-cpp-python not installed. Please install it with: pip install llama-cpp-python"
61
+
62
+ try:
63
+ print(f"Loading model from Hugging Face: {repo_id}/{filename}")
64
+
65
+ # Get optimal settings automatically
66
+ n_threads, n_gpu_layers = get_optimal_settings()
67
+ print(f"Auto-detected settings: {n_threads} CPU threads, {n_gpu_layers} GPU layers")
68
+
69
+ # Load model from Hugging Face with optimized settings
70
+ model = Llama.from_pretrained(
71
+ repo_id=repo_id,
72
+ filename=filename,
73
+ n_ctx=n_ctx, # Context window (configurable)
74
+ n_threads=n_threads, # CPU threads (auto-detected)
75
+ n_gpu_layers=n_gpu_layers, # Number of layers to offload to GPU (auto-detected)
76
+ verbose=False,
77
+ chat_format="chatml", # Use Llama-3 chat format
78
+ n_batch=512, # Batch size for prompt processing
79
+ use_mlock=True, # Keep model in memory
80
+ use_mmap=True, # Use memory mapping
81
+ )
82
+
83
+ model_loaded = True
84
+ print("Model loaded successfully!")
85
+ return True, f"✅ Model loaded successfully from {repo_id}/{filename}\n📊 Context: {n_ctx} tokens\n🖥️ CPU Threads: {n_threads}\n🎮 GPU Layers: {n_gpu_layers}"
86
+
87
+ except Exception as e:
88
+ model_loaded = False
89
+ error_msg = f"Error loading model: {str(e)}"
90
+ print(error_msg)
91
+ return False, f"❌ {error_msg}"
92
+
93
+ def load_model_from_gguf(gguf_path=None, n_ctx=2048):
94
+ """Load the model from a local GGUF file with automatic optimization"""
95
+ global model, model_loaded
96
+
97
+ if not LLAMA_CPP_AVAILABLE:
98
+ return False, "llama-cpp-python not installed. Please install it with: pip install llama-cpp-python"
99
+
100
+ try:
101
+ # If no path provided, try to find GGUF files
102
+ if gguf_path is None:
103
+ gguf_files = find_gguf_file()
104
+ if not gguf_files:
105
+ return False, "No GGUF files found in the repository"
106
+ gguf_path = gguf_files[0] # Use the first one found
107
+ print(f"Found GGUF file: {gguf_path}")
108
+
109
+ # Check if file exists
110
+ if not os.path.exists(gguf_path):
111
+ return False, f"GGUF file not found: {gguf_path}"
112
+
113
+ print(f"Loading model from: {gguf_path}")
114
+
115
+ # Get optimal settings automatically
116
+ n_threads, n_gpu_layers = get_optimal_settings()
117
+ print(f"Auto-detected settings: {n_threads} CPU threads, {n_gpu_layers} GPU layers")
118
+
119
+ # Load model with optimized settings
120
+ model = Llama(
121
+ model_path=gguf_path,
122
+ n_ctx=n_ctx, # Context window (configurable)
123
+ n_threads=n_threads, # CPU threads (auto-detected)
124
+ n_gpu_layers=n_gpu_layers, # Number of layers to offload to GPU (auto-detected)
125
+ verbose=False,
126
+ chat_format="llama-3", # Use Llama-3 chat format
127
+ n_batch=512, # Batch size for prompt processing
128
+ use_mlock=True, # Keep model in memory
129
+ use_mmap=True, # Use memory mapping
130
+ )
131
+
132
+ model_loaded = True
133
+ print("Model loaded successfully!")
134
+ return True, f"✅ Model loaded successfully from {os.path.basename(gguf_path)}\n📊 Context: {n_ctx} tokens\n🖥️ CPU Threads: {n_threads}\n🎮 GPU Layers: {n_gpu_layers}"
135
+
136
+ except Exception as e:
137
+ model_loaded = False
138
+ error_msg = f"Error loading model: {str(e)}"
139
+ print(error_msg)
140
+ return False, f"❌ {error_msg}"
141
+
142
+ def generate_response_stream(message, history, max_tokens=512, temperature=0.7, top_p=0.9, repeat_penalty=1.1):
143
+ """Generate response from the model with streaming"""
144
+ global model, model_loaded
145
+
146
+ if not model_loaded or model is None:
147
+ yield "Error: Model not loaded. Please load the model first."
148
+ return
149
+
150
+ try:
151
+ # Format the conversation history for Llama-3
152
+ conversation = []
153
+
154
+ # Add conversation history
155
+ for human, assistant in history:
156
+ conversation.append({"role": "user", "content": human})
157
+ if assistant: # Only add if assistant response exists
158
+ conversation.append({"role": "assistant", "content": assistant})
159
+
160
+ # Add current message
161
+ conversation.append({"role": "user", "content": message})
162
+
163
+ # Generate response with streaming
164
+ response = ""
165
+ stream = model.create_chat_completion(
166
+ messages=conversation,
167
+ max_tokens=max_tokens,
168
+ temperature=temperature,
169
+ top_p=top_p,
170
+ repeat_penalty=repeat_penalty,
171
+ stream=True,
172
+ stop=["<|eot_id|>", "<|end_of_text|>"]
173
+ )
174
+
175
+ for chunk in stream:
176
+ if chunk['choices'][0]['delta'].get('content'):
177
+ new_text = chunk['choices'][0]['delta']['content']
178
+ response += new_text
179
+ yield response
180
+
181
+ except Exception as e:
182
+ yield f"Error generating response: {str(e)}"
183
+
184
+ def chat_interface(message, history, max_tokens, temperature, top_p, repeat_penalty):
185
+ """Main chat interface function"""
186
+ if not message.strip():
187
+ return history, ""
188
+
189
+ if not model_loaded:
190
+ history.append((message, "Please load the model first using the 'Load Model' button."))
191
+ return history, ""
192
+
193
+ # Add user message to history
194
+ history = history + [(message, "")]
195
+
196
+ # Generate response
197
+ for response in generate_response_stream(message, history[:-1], max_tokens, temperature, top_p, repeat_penalty):
198
+ history[-1] = (message, response)
199
+ yield history, ""
200
+
201
+ def clear_chat():
202
+ """Clear the chat history"""
203
+ return [], ""
204
+
205
+ def load_model_interface(source_type, gguf_path, repo_id, filename, context_size):
206
+ """Interface function to load model with configurable context size"""
207
+ if source_type == "Hugging Face":
208
+ success, message = load_model_from_huggingface(repo_id, filename, n_ctx=int(context_size))
209
+ else: # Local file
210
+ success, message = load_model_from_gguf(gguf_path, n_ctx=int(context_size))
211
+ return message
212
+
213
+ def get_available_gguf_files():
214
+ """Get list of available GGUF files"""
215
+ gguf_files = find_gguf_file()
216
+ if not gguf_files:
217
+ return ["No GGUF files found"]
218
+ return [os.path.basename(f) for f in gguf_files]
219
+
220
+ # Create the Gradio interface
221
+ def create_interface():
222
+ # Get available GGUF files
223
+ gguf_files = find_gguf_file()
224
+ gguf_choices = [os.path.basename(f) for f in gguf_files] if gguf_files else ["No GGUF files found"]
225
+
226
+ with gr.Blocks(title="Llama-3-8B GGUF Chatbot", theme=gr.themes.Soft()) as demo:
227
+ gr.HTML("""
228
+ <h1 style="text-align: center; color: #2E86AB; margin-bottom: 30px;">
229
+ 🦙 MMed-Llama-Alpaca GGUF Chatbot
230
+ </h1>
231
+ <p style="text-align: center; color: #666; margin-bottom: 30px;">
232
+ Chat with the MMed-Llama-Alpaca model (Q4_K_M quantized) for medical assistance!
233
+ </p>
234
+ """)
235
+
236
+ with gr.Row():
237
+ with gr.Column(scale=4):
238
+ # Chat interface
239
+ chatbot = gr.Chatbot(
240
+ height=500,
241
+ show_copy_button=True,
242
+ bubble_full_width=False,
243
+ show_label=False,
244
+ placeholder="Model not loaded. Please load the model first to start chatting."
245
+ )
246
+
247
+ with gr.Row():
248
+ msg = gr.Textbox(
249
+ placeholder="Type your message here...",
250
+ container=False,
251
+ scale=7,
252
+ show_label=False
253
+ )
254
+ submit_btn = gr.Button("Send", variant="primary", scale=1)
255
+ clear_btn = gr.Button("Clear", variant="secondary", scale=1)
256
+
257
+ with gr.Column(scale=1):
258
+ # Model loading section
259
+ gr.HTML("<h3>🔧 Model Control</h3>")
260
+
261
+ # Model source selection
262
+ source_type = gr.Radio(
263
+ choices=["Hugging Face", "Local File"],
264
+ value="Hugging Face",
265
+ label="Model Source",
266
+ info="Choose where to load the model from"
267
+ )
268
+
269
+ # Hugging Face settings
270
+ with gr.Group(visible=True) as hf_group:
271
+ gr.HTML("<h4>🤗 Hugging Face Settings</h4>")
272
+ repo_id = gr.Textbox(
273
+ value="Axcel1/MMed-llama-alpaca-Q4_K_M-GGUF",
274
+ label="Repository ID",
275
+ info="e.g., username/repo-name"
276
+ )
277
+ filename = gr.Textbox(
278
+ value="mmed-llama-alpaca-q4_k_m.gguf",
279
+ label="Filename",
280
+ info="GGUF filename in the repository"
281
+ )
282
+
283
+ # Local file settings
284
+ with gr.Group(visible=False) as local_group:
285
+ gr.HTML("<h4>📁 Local File Settings</h4>")
286
+ if gguf_files:
287
+ gguf_dropdown = gr.Dropdown(
288
+ choices=gguf_choices,
289
+ value=gguf_choices[0] if gguf_choices[0] != "No GGUF files found" else None,
290
+ label="Select GGUF File",
291
+ info="Choose which GGUF file to load"
292
+ )
293
+ else:
294
+ gguf_dropdown = gr.Textbox(
295
+ value="No GGUF files found in repository",
296
+ label="GGUF File",
297
+ interactive=False
298
+ )
299
+
300
+ load_btn = gr.Button("Load Model", variant="primary", size="lg")
301
+ model_status = gr.Textbox(
302
+ label="Status",
303
+ value="Model not loaded. Configure settings and click 'Load Model'.\n⚙️ Auto-optimized: CPU threads & GPU layers auto-detected\n📝 Context size can be configured in Generation Settings",
304
+ interactive=False,
305
+ max_lines=5
306
+ )
307
+
308
+ # Generation parameters
309
+ gr.HTML("<h3>⚙️ Generation Settings</h3>")
310
+
311
+ # Context size (now as a slider)
312
+ context_size = gr.Slider(
313
+ minimum=512,
314
+ maximum=8192,
315
+ value=2048,
316
+ step=256,
317
+ label="Context Size",
318
+ info="Token context window (requires model reload)"
319
+ )
320
+
321
+ max_tokens = gr.Slider(
322
+ minimum=50,
323
+ maximum=2048,
324
+ value=512,
325
+ step=50,
326
+ label="Max Tokens",
327
+ info="Maximum response length"
328
+ )
329
+ temperature = gr.Slider(
330
+ minimum=0.1,
331
+ maximum=2.0,
332
+ value=0.7,
333
+ step=0.1,
334
+ label="Temperature",
335
+ info="Creativity (higher = more creative)"
336
+ )
337
+ top_p = gr.Slider(
338
+ minimum=0.1,
339
+ maximum=1.0,
340
+ value=0.9,
341
+ step=0.1,
342
+ label="Top-p",
343
+ info="Nucleus sampling"
344
+ )
345
+ repeat_penalty = gr.Slider(
346
+ minimum=1.0,
347
+ maximum=1.5,
348
+ value=1.1,
349
+ step=0.1,
350
+ label="Repeat Penalty",
351
+ info="Penalize repetition"
352
+ )
353
+
354
+ # Information section
355
+ gr.HTML("""
356
+ <h3>ℹ️ About</h3>
357
+ <p><strong>Format:</strong> GGUF (optimized)</p>
358
+ <p><strong>Backend:</strong> llama-cpp-python</p>
359
+ <p><strong>Features:</strong> CPU/GPU support, streaming</p>
360
+ <p><strong>Memory:</strong> Optimized usage</p>
361
+ <p><strong>Auto-Optimization:</strong> CPU threads & GPU layers detected automatically</p>
362
+ <p><strong>Sources:</strong> Hugging Face Hub or Local Files</p>
363
+ """)
364
+
365
+ if not LLAMA_CPP_AVAILABLE:
366
+ gr.HTML("""
367
+ <div style="background-color: #ffebee; padding: 10px; border-radius: 5px; margin-top: 10px;">
368
+ <p style="color: #c62828; margin: 0;"><strong>⚠️ Missing Dependency</strong></p>
369
+ <p style="color: #c62828; margin: 0; font-size: 0.9em;">
370
+ Install llama-cpp-python:<br>
371
+ <code>pip install llama-cpp-python</code>
372
+ </p>
373
+ </div>
374
+ """)
375
+
376
+ # Event handlers
377
+ def toggle_source_visibility(source_type):
378
+ if source_type == "Hugging Face":
379
+ return gr.update(visible=True), gr.update(visible=False)
380
+ else:
381
+ return gr.update(visible=False), gr.update(visible=True)
382
+
383
+ source_type.change(
384
+ toggle_source_visibility,
385
+ inputs=source_type,
386
+ outputs=[hf_group, local_group]
387
+ )
388
+
389
+ load_btn.click(
390
+ load_model_interface,
391
+ inputs=[source_type, gguf_dropdown, repo_id, filename, context_size],
392
+ outputs=model_status
393
+ )
394
+
395
+ submit_btn.click(
396
+ chat_interface,
397
+ inputs=[msg, chatbot, max_tokens, temperature, top_p, repeat_penalty],
398
+ outputs=[chatbot, msg]
399
+ )
400
+
401
+ msg.submit(
402
+ chat_interface,
403
+ inputs=[msg, chatbot, max_tokens, temperature, top_p, repeat_penalty],
404
+ outputs=[chatbot, msg]
405
+ )
406
+
407
+ clear_btn.click(
408
+ clear_chat,
409
+ outputs=[chatbot, msg]
410
+ )
411
+
412
+ return demo
413
+
414
+ if __name__ == "__main__":
415
+ # Create and launch the interface
416
+ demo = create_interface()
417
+
418
+ # Launch with appropriate settings for Hugging Face Spaces
419
+ demo.launch(
420
+ server_name="0.0.0.0",
421
+ server_port=7860,
422
+ share=False,
423
+ debug=False,
424
+ show_error=True,
425
+ quiet=False
426
+ )