Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
#!/usr/bin/env python3
|
2 |
"""
|
3 |
-
Enhanced Production-Ready Mamba Encoder Swarm Demo
|
4 |
-
Integrates pretrained Mamba weights
|
5 |
"""
|
6 |
|
7 |
import gradio as gr
|
@@ -12,11 +12,17 @@ import json
|
|
12 |
import logging
|
13 |
import os
|
14 |
import psutil
|
15 |
-
|
|
|
|
|
16 |
from datetime import datetime
|
17 |
-
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM
|
18 |
from huggingface_hub import snapshot_download, hf_hub_download
|
19 |
|
|
|
|
|
|
|
|
|
20 |
# Setup comprehensive logging
|
21 |
logging.basicConfig(
|
22 |
level=logging.INFO,
|
@@ -29,7 +35,7 @@ logging.basicConfig(
|
|
29 |
logger = logging.getLogger(__name__)
|
30 |
|
31 |
class MambaWeightLoader:
|
32 |
-
"""Dynamic loader for pretrained Mamba weights"""
|
33 |
|
34 |
def __init__(self, model_name="state-spaces/mamba-130m"):
|
35 |
self.model_name = model_name
|
@@ -37,54 +43,111 @@ class MambaWeightLoader:
|
|
37 |
self.model = None
|
38 |
self.tokenizer = None
|
39 |
self.config = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
def download_and_load(self):
|
42 |
-
"""Download and load Mamba weights
|
43 |
try:
|
44 |
logger.info(f"π Loading pretrained model: {self.model_name}")
|
45 |
-
|
46 |
-
# Create cache directory
|
47 |
os.makedirs(self.cache_dir, exist_ok=True)
|
48 |
|
49 |
-
|
|
|
|
|
50 |
logger.info("π Loading tokenizer...")
|
51 |
try:
|
52 |
-
# Try loading the specific tokenizer first
|
53 |
self.tokenizer = AutoTokenizer.from_pretrained(
|
54 |
self.model_name,
|
55 |
cache_dir=self.cache_dir,
|
56 |
trust_remote_code=True,
|
57 |
-
use_fast=False
|
58 |
)
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
logger.info("Using GPT2 tokenizer as fallback...")
|
63 |
-
from transformers import GPT2Tokenizer
|
64 |
self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
|
|
65 |
|
66 |
-
#
|
67 |
if self.tokenizer.pad_token is None:
|
68 |
if self.tokenizer.eos_token is not None:
|
69 |
self.tokenizer.pad_token = self.tokenizer.eos_token
|
70 |
else:
|
71 |
self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})
|
72 |
|
73 |
-
# Load
|
74 |
logger.info("βοΈ Loading model configuration...")
|
75 |
self.config = AutoConfig.from_pretrained(
|
76 |
self.model_name,
|
77 |
cache_dir=self.cache_dir,
|
78 |
trust_remote_code=True
|
79 |
)
|
|
|
80 |
|
81 |
-
# Load model with
|
82 |
logger.info("π§ Loading model weights...")
|
83 |
-
|
84 |
-
# Determine optimal dtype and device settings
|
85 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
86 |
-
dtype = torch.float16 if device.type == "cuda" else torch.float32
|
87 |
-
|
88 |
try:
|
89 |
self.model = AutoModelForCausalLM.from_pretrained(
|
90 |
self.model_name,
|
@@ -92,39 +155,42 @@ class MambaWeightLoader:
|
|
92 |
cache_dir=self.cache_dir,
|
93 |
trust_remote_code=True,
|
94 |
torch_dtype=dtype,
|
95 |
-
device_map=
|
96 |
-
low_cpu_mem_usage=True
|
97 |
-
|
98 |
-
except Exception as model_error:
|
99 |
-
logger.error(f"Model loading failed: {model_error}")
|
100 |
-
# Try with basic settings
|
101 |
-
logger.info("Retrying with basic model loading settings...")
|
102 |
-
self.model = AutoModelForCausalLM.from_pretrained(
|
103 |
-
self.model_name,
|
104 |
-
trust_remote_code=True,
|
105 |
-
torch_dtype=dtype
|
106 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
|
108 |
-
#
|
109 |
-
if not
|
110 |
self.model.to(device)
|
111 |
-
|
112 |
self.model.eval()
|
113 |
|
114 |
-
# Log
|
115 |
num_params = sum(p.numel() for p in self.model.parameters())
|
116 |
-
logger.info(f"β
Model loaded
|
117 |
-
logger.info(f"π Parameters: {num_params:,} ({num_params/1e6:.1f}M)")
|
118 |
logger.info(f"π§ Device: {device}, dtype: {dtype}")
|
119 |
|
120 |
return True
|
121 |
|
122 |
except Exception as e:
|
123 |
-
logger.error(f"β Error loading
|
124 |
return False
|
125 |
|
126 |
def get_model_info(self):
|
127 |
-
"""Get model information"""
|
128 |
if self.model:
|
129 |
try:
|
130 |
num_params = sum(p.numel() for p in self.model.parameters())
|
@@ -141,14 +207,59 @@ class MambaWeightLoader:
|
|
141 |
"hidden_size": getattr(self.config, 'd_model', getattr(self.config, 'hidden_size', 'Unknown'))
|
142 |
}
|
143 |
except Exception as e:
|
144 |
-
logger.error(f"Error getting model info: {e}")
|
145 |
return {"error": str(e)}
|
146 |
return None
|
147 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
class MambaSwarmDemo:
|
149 |
-
"""Enhanced Production-ready Mamba Swarm Demo
|
150 |
|
151 |
def __init__(self, model_path: str = "./", fallback_mode: bool = False):
|
|
|
152 |
self.model = None
|
153 |
self.tokenizer = None
|
154 |
self.config = None
|
@@ -159,7 +270,10 @@ class MambaSwarmDemo:
|
|
159 |
self.pretrained_loader = None
|
160 |
self.using_pretrained = False
|
161 |
|
162 |
-
# Performance
|
|
|
|
|
|
|
163 |
self.stats = {
|
164 |
'total_requests': 0,
|
165 |
'successful_generations': 0,
|
@@ -168,253 +282,129 @@ class MambaSwarmDemo:
|
|
168 |
'total_tokens_generated': 0
|
169 |
}
|
170 |
|
171 |
-
# Domain
|
172 |
self.domain_keywords = {
|
173 |
-
'medical': ['medical', 'health', 'doctor', 'patient', 'disease', 'treatment'
|
174 |
-
'legal': ['legal', 'law', 'court', 'judge', 'contract', '
|
175 |
-
'code': ['code', 'python', 'programming', 'function', 'algorithm', 'software'
|
176 |
-
'science': ['science', 'research', 'experiment', 'theory', 'physics'
|
177 |
-
'creative': ['story', 'creative', 'write', 'novel', 'poem', 'character'
|
178 |
-
'business': ['business', 'marketing', 'strategy', 'finance', 'management'
|
179 |
-
'general': ['explain', 'what', 'how', 'why', 'describe', 'tell'
|
180 |
}
|
181 |
|
|
|
182 |
self._initialize_model()
|
183 |
-
logger.info(f"Demo initialized - Model
|
184 |
|
185 |
def _initialize_model(self):
|
186 |
-
"""Initialize model with
|
187 |
try:
|
188 |
-
logger.info("π Attempting to load model with priority: Pretrained -> Custom -> Fallback")
|
189 |
-
|
190 |
-
# Try to load pretrained model first (highest priority)
|
191 |
success = self._load_pretrained_model()
|
192 |
-
|
193 |
if not success:
|
194 |
-
logger.info("Pretrained loading failed, trying custom swarm model...")
|
195 |
success = self._load_custom_swarm_model()
|
196 |
-
|
197 |
if not success:
|
198 |
-
logger.info("All model loading attempts failed, enabling fallback mode")
|
199 |
self.fallback_mode = True
|
200 |
self._initialize_fallback_mode()
|
201 |
-
|
202 |
except Exception as e:
|
203 |
logger.error(f"Model initialization failed: {e}")
|
204 |
-
logger.info("Falling back to simulation mode")
|
205 |
self.fallback_mode = True
|
206 |
self._initialize_fallback_mode()
|
207 |
|
208 |
def _load_pretrained_model(self):
|
209 |
-
"""Load pretrained
|
210 |
try:
|
211 |
-
# Choose model based on available resources - using more compatible models
|
212 |
MODEL_OPTIONS = {
|
213 |
-
"small": "gpt2",
|
214 |
-
"medium": "microsoft/DialoGPT-medium",
|
215 |
-
"mamba-small": "state-spaces/mamba-130m",
|
216 |
-
"mamba-medium": "state-spaces/mamba-790m",
|
217 |
-
"mamba-large": "state-spaces/mamba-1.4b",
|
218 |
-
"mamba-xl": "state-spaces/mamba-2.8b", # Original Mamba XL
|
219 |
}
|
220 |
|
221 |
-
#
|
222 |
memory_gb = psutil.virtual_memory().total / (1024**3)
|
|
|
223 |
|
224 |
-
|
225 |
-
|
226 |
-
if memory_gb >= 32 and torch.cuda.is_available():
|
227 |
-
model_priority = ["mamba-xl", "mamba-large", "mamba-medium", "medium", "small"]
|
228 |
-
elif memory_gb >= 16 and torch.cuda.is_available():
|
229 |
-
model_priority = ["mamba-large", "mamba-medium", "medium", "small"]
|
230 |
elif memory_gb >= 8:
|
231 |
-
|
232 |
else:
|
233 |
-
|
234 |
|
235 |
-
logger.info(f"π― Model priority
|
236 |
|
237 |
-
|
238 |
-
for model_key in model_priority:
|
239 |
selected_model = MODEL_OPTIONS[model_key]
|
240 |
-
logger.info(f"π Trying
|
241 |
|
242 |
try:
|
243 |
-
# Initialize loader
|
244 |
self.pretrained_loader = MambaWeightLoader(selected_model)
|
245 |
-
|
246 |
-
# Download and load
|
247 |
if self.pretrained_loader.download_and_load():
|
248 |
self.model = self.pretrained_loader.model
|
249 |
self.tokenizer = self.pretrained_loader.tokenizer
|
250 |
self.config = self.pretrained_loader.config
|
251 |
self.model_loaded = True
|
252 |
self.using_pretrained = True
|
253 |
-
|
254 |
-
logger.info(f"β
Successfully loaded pretrained model: {selected_model}")
|
255 |
return True
|
256 |
-
|
257 |
-
|
258 |
-
continue
|
259 |
-
|
260 |
-
except Exception as model_error:
|
261 |
-
logger.warning(f"β Error with {selected_model}: {model_error}")
|
262 |
continue
|
263 |
|
264 |
-
logger.warning("β All pretrained models failed to load")
|
265 |
return False
|
266 |
-
|
267 |
except Exception as e:
|
268 |
-
logger.error(f"Pretrained
|
269 |
return False
|
270 |
|
271 |
def _load_custom_swarm_model(self):
|
272 |
-
"""Try to load custom swarm model
|
273 |
try:
|
274 |
-
logger.info("Attempting
|
275 |
-
|
276 |
-
|
277 |
-
model_class = None
|
278 |
-
|
279 |
-
try:
|
280 |
-
from modeling_mamba_swarm import MambaSwarmForCausalLM
|
281 |
-
model_class = MambaSwarmForCausalLM
|
282 |
-
logger.info("Found MambaSwarmForCausalLM")
|
283 |
-
except ImportError:
|
284 |
-
try:
|
285 |
-
from core.mamba_swarm_integration import MambaEncoderSwarmModel
|
286 |
-
model_class = MambaEncoderSwarmModel
|
287 |
-
logger.info("Found MambaEncoderSwarmModel")
|
288 |
-
except ImportError:
|
289 |
-
try:
|
290 |
-
from system.mambaSwarm import UnifiedMambaSwarm
|
291 |
-
# Use the unified swarm in native mode
|
292 |
-
swarm = UnifiedMambaSwarm(use_pretrained=False)
|
293 |
-
if hasattr(swarm, 'native_swarm_model') and swarm.native_swarm_model:
|
294 |
-
self.model = swarm.native_swarm_model
|
295 |
-
self.model_loaded = True
|
296 |
-
logger.info("Loaded native swarm model from UnifiedMambaSwarm")
|
297 |
-
return True
|
298 |
-
else:
|
299 |
-
raise ImportError("No native swarm model available")
|
300 |
-
except ImportError:
|
301 |
-
logger.warning("No custom swarm model found")
|
302 |
-
return False
|
303 |
-
|
304 |
-
if model_class is None:
|
305 |
-
return False
|
306 |
-
|
307 |
-
# Create configuration for custom model
|
308 |
-
try:
|
309 |
-
from modeling_mamba_swarm import MambaSwarmConfig
|
310 |
-
self.config = MambaSwarmConfig(
|
311 |
-
num_encoders=8,
|
312 |
-
max_mamba_encoders=100,
|
313 |
-
d_model=768,
|
314 |
-
vocab_size=50257,
|
315 |
-
max_sequence_length=2048
|
316 |
-
)
|
317 |
-
except ImportError:
|
318 |
-
# Fallback config
|
319 |
-
try:
|
320 |
-
from core.config import MambaConfig
|
321 |
-
self.config = MambaConfig()
|
322 |
-
self.config.num_encoders = 8
|
323 |
-
self.config.max_mamba_encoders = 100
|
324 |
-
except ImportError:
|
325 |
-
# Create minimal config
|
326 |
-
self.config = type('Config', (), {
|
327 |
-
'num_encoders': 8,
|
328 |
-
'max_mamba_encoders': 100,
|
329 |
-
'd_model': 768,
|
330 |
-
'vocab_size': 50257,
|
331 |
-
'max_sequence_length': 2048
|
332 |
-
})()
|
333 |
-
|
334 |
-
# Initialize custom model
|
335 |
-
if model_class.__name__ == 'MambaEncoderSwarmModel':
|
336 |
-
self.model = model_class(self.config, num_encoders=8)
|
337 |
-
else:
|
338 |
-
self.model = model_class(self.config)
|
339 |
-
|
340 |
-
# Create tokenizer
|
341 |
-
from transformers import GPT2Tokenizer
|
342 |
-
self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
343 |
-
if self.tokenizer.pad_token is None:
|
344 |
-
self.tokenizer.pad_token = self.tokenizer.eos_token
|
345 |
-
|
346 |
-
self.model.to(self.device)
|
347 |
-
self.model.eval()
|
348 |
-
self.model_loaded = True
|
349 |
-
|
350 |
-
logger.info("β
Custom swarm model loaded successfully!")
|
351 |
-
return True
|
352 |
-
|
353 |
except Exception as e:
|
354 |
-
logger.error(f"Custom model
|
355 |
return False
|
356 |
|
357 |
def _initialize_fallback_mode(self):
|
358 |
-
"""Initialize
|
359 |
-
logger.info("Initializing
|
360 |
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
d_model=768,
|
368 |
-
vocab_size=50257,
|
369 |
-
max_sequence_length=2048
|
370 |
-
)
|
371 |
-
except ImportError:
|
372 |
-
# Fallback mock config
|
373 |
-
self.config = type('MockConfig', (), {
|
374 |
-
'max_mamba_encoders': 100,
|
375 |
-
'num_encoders': 8,
|
376 |
-
'd_model': 768,
|
377 |
-
'vocab_size': 50257,
|
378 |
-
'max_sequence_length': 2048
|
379 |
-
})()
|
380 |
|
381 |
-
# Create mock tokenizer
|
382 |
class MockTokenizer:
|
383 |
def __init__(self):
|
384 |
self.pad_token_id = 0
|
385 |
self.eos_token_id = 1
|
386 |
-
self.pad_token = "[PAD]"
|
387 |
-
self.eos_token = "[EOS]"
|
388 |
|
389 |
def encode(self, text, return_tensors=None):
|
390 |
-
tokens = text.split()
|
391 |
-
|
392 |
-
if return_tensors == "pt":
|
393 |
-
return torch.tensor([token_ids])
|
394 |
-
return token_ids
|
395 |
|
396 |
-
def decode(self,
|
397 |
-
return f"
|
398 |
-
|
399 |
-
self.tokenizer = MockTokenizer()
|
400 |
|
401 |
-
# Create mock model
|
402 |
class MockModel:
|
403 |
def __init__(self, config):
|
404 |
self.config = config
|
405 |
self.num_active_encoders = 5
|
406 |
|
407 |
-
def set_active_encoders(self, num):
|
408 |
-
self.num_active_encoders = min(num, self.config.max_mamba_encoders)
|
409 |
-
|
410 |
def eval(self):
|
411 |
pass
|
412 |
|
|
|
413 |
self.model = MockModel(self.config)
|
414 |
-
logger.info("
|
415 |
|
416 |
def _detect_domain(self, prompt: str) -> Tuple[str, float]:
|
417 |
-
"""Detect
|
418 |
prompt_lower = prompt.lower()
|
419 |
domain_scores = {}
|
420 |
|
@@ -431,527 +421,151 @@ class MambaSwarmDemo:
|
|
431 |
return 'general', 0.5
|
432 |
|
433 |
def _simulate_encoder_selection(self, prompt: str, num_encoders: int) -> Dict[str, Any]:
|
434 |
-
"""Simulate
|
435 |
domain, confidence = self._detect_domain(prompt)
|
436 |
|
437 |
-
# Domain-specific encoder ranges (simulated)
|
438 |
domain_ranges = {
|
439 |
-
'medical': (1, 20),
|
440 |
-
'
|
441 |
-
'code': (41, 60),
|
442 |
-
'science': (61, 80),
|
443 |
-
'creative': (81, 95),
|
444 |
-
'business': (96, 100),
|
445 |
'general': (1, 100)
|
446 |
}
|
447 |
|
448 |
start, end = domain_ranges.get(domain, (1, 100))
|
449 |
available_encoders = list(range(start, min(end + 1, 101)))
|
450 |
|
451 |
-
|
452 |
-
prompt_complexity = min(len(prompt.split()) / 10, 3.0)
|
453 |
-
optimal_count = min(max(int(num_encoders * (1 + prompt_complexity)), 3), 25)
|
454 |
-
|
455 |
if len(available_encoders) >= optimal_count:
|
456 |
selected = np.random.choice(available_encoders, size=optimal_count, replace=False)
|
457 |
else:
|
458 |
selected = available_encoders
|
459 |
|
460 |
-
selected_encoders = sorted(selected.tolist())
|
461 |
-
|
462 |
-
# Generate confidence scores
|
463 |
-
base_confidence = max(0.6, confidence)
|
464 |
-
confidence_scores = np.random.normal(base_confidence, 0.1, len(selected_encoders))
|
465 |
-
confidence_scores = np.clip(confidence_scores, 0.5, 0.98).tolist()
|
466 |
-
|
467 |
return {
|
468 |
-
'selected_encoders':
|
469 |
-
'confidence_scores':
|
470 |
'detected_domain': domain,
|
471 |
'domain_confidence': confidence,
|
472 |
-
'total_active': len(
|
473 |
}
|
474 |
|
475 |
-
def generate_text(self, prompt: str, max_length: int = 100, temperature: float = 0.7,
|
476 |
top_p: float = 0.9, num_encoders: int = 5, show_routing: bool = True) -> Tuple[str, str]:
|
477 |
-
"""Generate text with
|
478 |
start_time = time.time()
|
479 |
-
|
480 |
-
# Update statistics
|
481 |
self.stats['total_requests'] += 1
|
482 |
|
483 |
try:
|
484 |
if not prompt.strip():
|
485 |
return "Please enter a prompt.", ""
|
486 |
|
487 |
-
# Simulate routing decision
|
488 |
routing_info = self._simulate_encoder_selection(prompt, num_encoders)
|
489 |
|
490 |
if self.model_loaded and not self.fallback_mode:
|
491 |
-
|
492 |
-
response = self._generate_real(prompt, max_length, temperature, top_p, num_encoders)
|
493 |
else:
|
494 |
-
|
495 |
-
response = self._simulate_generation(prompt, routing_info, max_length)
|
496 |
|
497 |
-
#
|
498 |
generation_time = time.time() - start_time
|
499 |
estimated_tokens = len(response.split())
|
500 |
|
501 |
-
# Update statistics
|
502 |
self.stats['successful_generations'] += 1
|
503 |
self.stats['total_tokens_generated'] += estimated_tokens
|
|
|
504 |
|
505 |
-
#
|
506 |
-
total_successful = self.stats['successful_generations']
|
507 |
-
prev_avg = self.stats['avg_generation_time']
|
508 |
-
self.stats['avg_generation_time'] = (prev_avg * (total_successful - 1) + generation_time) / total_successful
|
509 |
-
|
510 |
-
# Generate routing display
|
511 |
routing_display = ""
|
512 |
if show_routing:
|
513 |
routing_display = self._create_routing_display(routing_info, generation_time, estimated_tokens)
|
514 |
|
515 |
-
logger.info(f"Generated {estimated_tokens} tokens in {generation_time:.2f}s")
|
516 |
return response, routing_display
|
517 |
|
518 |
except Exception as e:
|
519 |
self.stats['failed_generations'] += 1
|
520 |
-
error_msg = f"
|
521 |
logger.error(error_msg)
|
522 |
return error_msg, ""
|
523 |
|
524 |
-
def _generate_real(self, prompt: str, max_length: int, temperature: float,
|
525 |
-
|
526 |
-
"""Generate using real pretrained or custom model"""
|
527 |
try:
|
528 |
-
|
529 |
-
try:
|
530 |
-
inputs = self.tokenizer.encode(prompt, return_tensors="pt").to(self.device)
|
531 |
-
except Exception as tokenize_error:
|
532 |
-
logger.error(f"Tokenization error: {tokenize_error}")
|
533 |
-
return f"Tokenization error: {str(tokenize_error)}"
|
534 |
|
535 |
-
# Adjust number of active encoders (if supported)
|
536 |
-
if hasattr(self.model, 'set_active_encoders'):
|
537 |
-
max_encoders = getattr(self.config, 'max_mamba_encoders', 100)
|
538 |
-
self.model.set_active_encoders(min(num_encoders, max_encoders))
|
539 |
-
|
540 |
-
# Check if model has generate method
|
541 |
-
if not hasattr(self.model, 'generate'):
|
542 |
-
logger.warning("Model doesn't have generate method, using forward pass")
|
543 |
-
return self._generate_with_forward_pass(inputs, prompt, max_length, temperature)
|
544 |
-
|
545 |
-
# Generate with memory optimization and better error handling
|
546 |
with torch.no_grad():
|
547 |
-
|
548 |
-
|
549 |
-
|
550 |
-
|
551 |
-
|
552 |
-
|
553 |
-
|
554 |
-
|
555 |
-
|
556 |
-
|
557 |
-
use_cache=True,
|
558 |
-
attention_mask=torch.ones_like(inputs),
|
559 |
-
repetition_penalty=1.1, # Prevent repetition
|
560 |
-
no_repeat_ngram_size=3 # Prevent n-gram repetition
|
561 |
-
)
|
562 |
-
except Exception as gen_error:
|
563 |
-
logger.warning(f"Full generation failed: {gen_error}")
|
564 |
-
# Try simpler generation
|
565 |
-
try:
|
566 |
-
outputs = self.model.generate(
|
567 |
-
inputs,
|
568 |
-
max_new_tokens=min(max_length, 256),
|
569 |
-
temperature=0.7,
|
570 |
-
do_sample=True,
|
571 |
-
pad_token_id=getattr(self.tokenizer, 'pad_token_id', 0),
|
572 |
-
eos_token_id=getattr(self.tokenizer, 'eos_token_id', 1)
|
573 |
-
)
|
574 |
-
except Exception as simple_gen_error:
|
575 |
-
logger.warning(f"Simple generation failed: {simple_gen_error}")
|
576 |
-
# Try greedy decoding
|
577 |
-
outputs = self.model.generate(
|
578 |
-
inputs,
|
579 |
-
max_new_tokens=min(max_length, 128),
|
580 |
-
do_sample=False,
|
581 |
-
pad_token_id=getattr(self.tokenizer, 'pad_token_id', 0),
|
582 |
-
eos_token_id=getattr(self.tokenizer, 'eos_token_id', 1)
|
583 |
-
)
|
584 |
|
585 |
-
|
586 |
-
try:
|
587 |
-
generated_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
588 |
-
except Exception as decode_error:
|
589 |
-
logger.error(f"Decoding error: {decode_error}")
|
590 |
-
return f"Decoding error: {str(decode_error)}"
|
591 |
|
592 |
-
# Clean up the response
|
593 |
if generated_text.startswith(prompt):
|
594 |
response = generated_text[len(prompt):].strip()
|
595 |
else:
|
596 |
response = generated_text.strip()
|
597 |
|
598 |
-
|
599 |
-
if not response or len(response) < 10 or response.count(' ') < 3:
|
600 |
-
logger.warning("Generated response seems too short or invalid, using enhanced simulation")
|
601 |
-
return self._generate_enhanced_simulation(prompt, max_length)
|
602 |
-
|
603 |
-
return response if response else "Generated response was empty."
|
604 |
|
605 |
-
except torch.cuda.OutOfMemoryError:
|
606 |
-
logger.error("CUDA out of memory during generation")
|
607 |
-
return "Error: GPU memory insufficient. Try reducing max_length or switching to CPU mode."
|
608 |
except Exception as e:
|
609 |
logger.error(f"Real generation error: {e}")
|
610 |
-
return self.
|
611 |
-
|
612 |
-
def _generate_with_forward_pass(self, inputs: torch.Tensor, prompt: str, max_length: int, temperature: float) -> str:
|
613 |
-
"""Generate using forward pass when generate method is not available"""
|
614 |
-
try:
|
615 |
-
logger.info("Using forward pass generation")
|
616 |
-
|
617 |
-
generated_tokens = inputs.clone()
|
618 |
-
max_gen_length = min(max_length, 200)
|
619 |
-
|
620 |
-
for _ in range(max_gen_length):
|
621 |
-
with torch.no_grad():
|
622 |
-
outputs = self.model(generated_tokens)
|
623 |
-
|
624 |
-
if hasattr(outputs, 'logits'):
|
625 |
-
logits = outputs.logits
|
626 |
-
else:
|
627 |
-
logits = outputs
|
628 |
-
|
629 |
-
# Get next token probabilities
|
630 |
-
next_token_logits = logits[:, -1, :] / max(temperature, 0.1)
|
631 |
-
next_token_probs = torch.softmax(next_token_logits, dim=-1)
|
632 |
-
|
633 |
-
# Sample next token
|
634 |
-
next_token = torch.multinomial(next_token_probs, num_samples=1)
|
635 |
-
|
636 |
-
# Check for EOS token
|
637 |
-
if next_token.item() == getattr(self.tokenizer, 'eos_token_id', 1):
|
638 |
-
break
|
639 |
-
|
640 |
-
# Append to sequence
|
641 |
-
generated_tokens = torch.cat([generated_tokens, next_token], dim=1)
|
642 |
-
|
643 |
-
# Decode the generated sequence
|
644 |
-
generated_text = self.tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
|
645 |
-
response = generated_text[len(prompt):].strip()
|
646 |
-
|
647 |
-
return response if response else self._generate_enhanced_simulation(prompt, max_length)
|
648 |
-
|
649 |
-
except Exception as e:
|
650 |
-
logger.error(f"Forward pass generation error: {e}")
|
651 |
-
return self._generate_enhanced_simulation(prompt, max_length)
|
652 |
|
653 |
-
def
|
654 |
-
"""
|
655 |
-
logger.info("Using enhanced simulation mode")
|
656 |
-
|
657 |
-
domain, confidence = self._detect_domain(prompt)
|
658 |
-
|
659 |
-
# More sophisticated domain-specific responses
|
660 |
if domain == 'code':
|
661 |
return f"""Here's a solution for your programming request:
|
662 |
|
663 |
```python
|
664 |
-
def
|
665 |
-
|
666 |
-
Implementation based on your requirements: {prompt[:100]}...
|
667 |
-
\"\"\"
|
668 |
try:
|
669 |
-
#
|
670 |
data = process_input()
|
671 |
|
672 |
-
# Core logic
|
673 |
result = perform_operation(data)
|
674 |
|
675 |
-
# Output formatting
|
676 |
-
return format_result(result)
|
677 |
-
|
678 |
-
except Exception as e:
|
679 |
-
print(f"Error occurred: {{e}}")
|
680 |
-
return None
|
681 |
-
|
682 |
-
def process_input():
|
683 |
-
# Process user input here
|
684 |
-
return processed_data
|
685 |
-
|
686 |
-
def perform_operation(data):
|
687 |
-
# Main operation logic
|
688 |
-
return operation_result
|
689 |
-
|
690 |
-
def format_result(result):
|
691 |
-
# Format and return result
|
692 |
-
return formatted_result
|
693 |
-
|
694 |
-
if __name__ == "__main__":
|
695 |
-
main()
|
696 |
-
```
|
697 |
-
|
698 |
-
This implementation includes proper error handling, modular structure, and follows Python best practices."""
|
699 |
-
|
700 |
-
elif domain == 'medical':
|
701 |
-
return f"""Regarding your medical inquiry about: {prompt[:100]}...
|
702 |
-
|
703 |
-
**Medical Overview:**
|
704 |
-
This topic relates to important health considerations that require professional medical evaluation.
|
705 |
-
|
706 |
-
**Key Medical Points:**
|
707 |
-
β’ Symptoms can vary significantly between individuals
|
708 |
-
β’ Proper medical history and examination are essential
|
709 |
-
β’ Diagnostic tests may be required for accurate assessment
|
710 |
-
β’ Treatment plans should be individualized based on specific circumstances
|
711 |
-
β’ Regular follow-up and monitoring may be necessary
|
712 |
-
|
713 |
-
**Risk Factors to Consider:**
|
714 |
-
β’ Age, gender, and genetic predisposition
|
715 |
-
β’ Existing medical conditions and medications
|
716 |
-
β’ Lifestyle factors and environmental exposures
|
717 |
-
β’ Previous medical history and family history
|
718 |
-
|
719 |
-
**When to Seek Medical Attention:**
|
720 |
-
β’ If symptoms persist or worsen
|
721 |
-
β’ If new concerning symptoms develop
|
722 |
-
β’ For routine screening and prevention
|
723 |
-
β’ When questions about treatment arise
|
724 |
-
|
725 |
-
**Important Disclaimer:** This information is for educational purposes only and should not replace professional medical advice. Please consult with qualified healthcare providers for proper diagnosis, treatment, and medical care specific to your situation."""
|
726 |
-
|
727 |
-
elif domain == 'science':
|
728 |
-
return f"""Scientific Analysis of: {prompt[:100]}...
|
729 |
-
|
730 |
-
**Scientific Overview:**
|
731 |
-
This topic involves complex scientific principles that can be understood through systematic analysis and evidence-based reasoning.
|
732 |
-
|
733 |
-
**Theoretical Framework:**
|
734 |
-
The underlying mechanisms involve interactions between multiple variables, governed by well-established scientific laws and emerging research findings.
|
735 |
-
|
736 |
-
**Key Scientific Principles:**
|
737 |
-
β’ Fundamental forces and interactions at play
|
738 |
-
β’ Thermodynamic and kinetic considerations
|
739 |
-
β’ Molecular and atomic-level processes
|
740 |
-
β’ Energy transfer and conservation laws
|
741 |
-
β’ Equilibrium states and dynamic systems
|
742 |
-
|
743 |
-
**Current Research Status:**
|
744 |
-
Recent peer-reviewed studies have advanced our understanding of these phenomena, with several breakthrough discoveries providing new insights into the mechanisms involved.
|
745 |
-
|
746 |
-
**Practical Applications:**
|
747 |
-
β’ Industrial and technological implementations
|
748 |
-
β’ Medical and pharmaceutical applications
|
749 |
-
β’ Environmental and sustainability implications
|
750 |
-
β’ Future research directions and potential developments
|
751 |
-
|
752 |
-
**Methodology Considerations:**
|
753 |
-
Scientific investigation of this topic requires controlled experimental conditions, precise measurement techniques, and statistical analysis to ensure reliable and reproducible results."""
|
754 |
-
|
755 |
-
elif domain == 'legal':
|
756 |
-
return f"""Legal Analysis regarding: {prompt[:100]}...
|
757 |
-
|
758 |
-
**Legal Framework:**
|
759 |
-
This matter involves various legal considerations that depend on jurisdiction, applicable statutes, and case law precedent.
|
760 |
-
|
761 |
-
**Key Legal Aspects:**
|
762 |
-
β’ Statutory requirements and regulatory compliance
|
763 |
-
β’ Common law principles and judicial precedent
|
764 |
-
β’ Constitutional considerations where applicable
|
765 |
-
β’ Procedural requirements and deadlines
|
766 |
-
β’ Rights and obligations of involved parties
|
767 |
-
|
768 |
-
**Jurisdictional Considerations:**
|
769 |
-
β’ Federal vs. state/provincial law applications
|
770 |
-
β’ International treaty obligations where relevant
|
771 |
-
β’ Cross-border enforcement mechanisms
|
772 |
-
β’ Conflict of laws principles
|
773 |
-
|
774 |
-
**Risk Assessment:**
|
775 |
-
β’ Potential legal exposure and liability
|
776 |
-
β’ Compliance requirements and penalties
|
777 |
-
β’ Litigation risks and dispute resolution options
|
778 |
-
β’ Insurance and indemnification considerations
|
779 |
-
|
780 |
-
**Recommended Actions:**
|
781 |
-
β’ Consult with qualified legal counsel
|
782 |
-
β’ Review relevant documentation and contracts
|
783 |
-
β’ Assess compliance with applicable regulations
|
784 |
-
β’ Consider alternative dispute resolution methods
|
785 |
-
|
786 |
-
**Legal Disclaimer:** This information is for general informational purposes only and does not constitute legal advice. Specific legal situations require consultation with qualified attorneys familiar with applicable law and jurisdiction."""
|
787 |
-
|
788 |
-
elif domain == 'business':
|
789 |
-
return f"""Business Strategy Analysis for: {prompt[:100]}...
|
790 |
-
|
791 |
-
**Executive Summary:**
|
792 |
-
This business challenge presents opportunities for strategic growth and operational optimization through data-driven decision making and market-focused initiatives.
|
793 |
-
|
794 |
-
**Market Analysis:**
|
795 |
-
β’ Current market size and growth trajectory
|
796 |
-
β’ Competitive landscape and positioning
|
797 |
-
β’ Customer segmentation and value propositions
|
798 |
-
β’ Industry trends and disruption factors
|
799 |
-
β’ Regulatory environment and compliance requirements
|
800 |
-
|
801 |
-
**Strategic Recommendations:**
|
802 |
-
|
803 |
-
*Short-term (0-6 months):*
|
804 |
-
β’ Immediate market positioning adjustments
|
805 |
-
β’ Resource allocation optimization
|
806 |
-
β’ Quick-win revenue opportunities
|
807 |
-
β’ Risk mitigation implementation
|
808 |
-
|
809 |
-
*Medium-term (6-18 months):*
|
810 |
-
β’ Strategic partnership development
|
811 |
-
β’ Product/service portfolio expansion
|
812 |
-
β’ Market penetration strategies
|
813 |
-
β’ Operational efficiency improvements
|
814 |
-
|
815 |
-
*Long-term (18+ months):*
|
816 |
-
β’ Innovation and R&D investments
|
817 |
-
β’ Market leadership positioning
|
818 |
-
β’ Scalability infrastructure development
|
819 |
-
β’ Sustainable competitive advantage building
|
820 |
-
|
821 |
-
**Financial Projections:**
|
822 |
-
Based on market analysis and conservative growth assumptions, implementing these strategies could result in significant ROI improvements and market share expansion.
|
823 |
-
|
824 |
-
**Implementation Roadmap:**
|
825 |
-
Phased approach with clear milestones, KPIs, and accountability measures to ensure successful execution and measurable results."""
|
826 |
-
|
827 |
-
elif domain == 'creative':
|
828 |
-
return f"""Creative Response to: {prompt[:50]}...
|
829 |
-
|
830 |
-
**The Story Unfolds**
|
831 |
-
|
832 |
-
In the realm where imagination meets reality, your creative vision takes shape. The narrative begins with a single moment of inspiration, growing into something far greater than the sum of its parts.
|
833 |
-
|
834 |
-
*Setting the Scene:*
|
835 |
-
The world around us shifts and transforms, revealing hidden layers of meaning and possibility. Each detail contributes to a larger tapestry of human experience, woven together by threads of emotion, memory, and hope.
|
836 |
-
|
837 |
-
*Character Development:*
|
838 |
-
Our protagonist faces the eternal question that defines all great stories: How do we find meaning in the midst of uncertainty? The journey ahead is fraught with challenges, but also filled with moments of profound discovery.
|
839 |
-
|
840 |
-
*The Central Conflict:*
|
841 |
-
Like all meaningful narratives, this story explores the tension between what is and what could be. The characters must navigate between their deepest fears and their highest aspirations, finding courage in unexpected places.
|
842 |
-
|
843 |
-
*Resolution and Growth:*
|
844 |
-
Through struggle and perseverance, the story reveals its deeper truth: that creativity itself is an act of courage, a willingness to venture into the unknown and bring back something meaningful for others to share.
|
845 |
-
|
846 |
-
*Themes Explored:*
|
847 |
-
β’ The power of imagination to transform reality
|
848 |
-
β’ The courage required to pursue creative vision
|
849 |
-
β’ The connection between individual expression and universal truth
|
850 |
-
β’ The role of art in making sense of human experience
|
851 |
-
|
852 |
-
The story continues to unfold, limited only by the boundaries of imagination itself."""
|
853 |
-
|
854 |
-
else: # general
|
855 |
-
return f"""Comprehensive Analysis of: {prompt[:100]}...
|
856 |
-
|
857 |
-
**Overview:**
|
858 |
-
Your inquiry touches on several important aspects that warrant careful consideration and analysis from multiple perspectives.
|
859 |
-
|
860 |
-
**Key Considerations:**
|
861 |
-
β’ Historical context and background information
|
862 |
-
β’ Current state of knowledge and understanding
|
863 |
-
β’ Multiple viewpoints and interpretations
|
864 |
-
β’ Practical implications and applications
|
865 |
-
β’ Future trends and potential developments
|
866 |
-
|
867 |
-
**Detailed Analysis:**
|
868 |
-
The topic involves complex interactions between various factors, each contributing to a nuanced understanding of the subject matter. Evidence-based reasoning suggests that successful approaches typically involve:
|
869 |
-
|
870 |
-
1. **Systematic Assessment** - Thorough evaluation of available information
|
871 |
-
2. **Critical Analysis** - Examination of assumptions and underlying principles
|
872 |
-
3. **Stakeholder Consideration** - Understanding impact on all affected parties
|
873 |
-
4. **Risk Evaluation** - Assessment of potential challenges and mitigation strategies
|
874 |
-
5. **Implementation Planning** - Practical steps for moving forward effectively
|
875 |
-
|
876 |
-
**Best Practices:**
|
877 |
-
β’ Maintain objectivity and evidence-based reasoning
|
878 |
-
β’ Consider multiple perspectives and potential outcomes
|
879 |
-
β’ Regular review and adjustment of approaches as needed
|
880 |
-
β’ Clear communication with all stakeholders involved
|
881 |
-
β’ Documentation of decisions and rationale for future reference
|
882 |
-
|
883 |
-
**Conclusion:**
|
884 |
-
This analysis provides a framework for understanding the key elements involved. Success typically requires combining theoretical knowledge with practical experience, while remaining adaptable to changing circumstances and new information."""
|
885 |
-
|
886 |
-
return response
|
887 |
-
|
888 |
-
def _simulate_generation(self, prompt: str, routing_info: Dict, max_length: int) -> str:
|
889 |
-
"""Generate sophisticated simulated responses"""
|
890 |
-
domain = routing_info['detected_domain']
|
891 |
-
|
892 |
-
# Enhanced domain-specific responses
|
893 |
-
if domain == 'code':
|
894 |
-
return f"""Here's a comprehensive solution for your request:
|
895 |
-
|
896 |
-
```python
|
897 |
-
def solution(input_data):
|
898 |
-
\"\"\"
|
899 |
-
Optimized implementation based on your requirements
|
900 |
-
\"\"\"
|
901 |
-
try:
|
902 |
-
# Input validation
|
903 |
-
if not input_data:
|
904 |
-
raise ValueError("Input cannot be empty")
|
905 |
-
|
906 |
-
# Process the data
|
907 |
-
result = process_input(input_data)
|
908 |
-
|
909 |
return result
|
910 |
except Exception as e:
|
911 |
print(f"Error: {{e}}")
|
912 |
return None
|
913 |
|
914 |
-
|
915 |
-
|
916 |
-
return processed_data
|
917 |
-
```
|
918 |
-
|
919 |
-
This solution includes error handling, input validation, and follows best practices for production code."""
|
920 |
-
|
921 |
elif domain == 'medical':
|
922 |
-
return f"""
|
923 |
|
924 |
-
**Overview:**
|
925 |
-
This topic involves several important medical considerations that should be evaluated by healthcare professionals.
|
926 |
|
927 |
**Key Points:**
|
928 |
-
β’ Symptoms
|
929 |
-
β’
|
930 |
-
β’ Treatment
|
931 |
-
β’ Regular monitoring may be
|
932 |
|
933 |
-
**
|
934 |
-
|
935 |
else:
|
936 |
return f"""**Response to: "{prompt[:50]}..."**
|
937 |
|
938 |
-
|
939 |
|
940 |
-
|
941 |
-
|
942 |
-
**Key insights:**
|
943 |
-
β’ The topic involves several interconnected factors
|
944 |
β’ Current understanding is based on established principles
|
945 |
-
β’ Practical applications may vary
|
946 |
β’ Further exploration could yield additional insights
|
947 |
|
948 |
-
**Domain
|
949 |
|
950 |
-
def _create_routing_display(self, routing_info: Dict, generation_time: float,
|
951 |
-
|
952 |
-
""
|
953 |
-
|
954 |
-
model_name = getattr(self.pretrained_loader, 'model_name', 'Custom/Simulation') if self.pretrained_loader else 'Custom/Simulation'
|
955 |
|
956 |
return f"""
|
957 |
## π§ Intelligent Routing Analysis
|
@@ -959,166 +573,74 @@ This is a comprehensive response that addresses your query with relevant informa
|
|
959 |
**π― Domain Detection:**
|
960 |
- **Primary Domain**: {routing_info['detected_domain'].title()}
|
961 |
- **Confidence**: {routing_info['domain_confidence']:.1%}
|
962 |
-
- **Specialization Level**: {'High' if routing_info['domain_confidence'] > 0.7 else 'Medium' if routing_info['domain_confidence'] > 0.4 else 'General'}
|
963 |
|
964 |
**β‘ Model Information:**
|
965 |
-
- **
|
966 |
-
- **
|
967 |
-
- **Active Encoders**: {routing_info['total_active']}/
|
968 |
- **Device**: {self.device}
|
969 |
|
970 |
-
|
971 |
-
{', '.join(map(str, routing_info['selected_encoders'][:15]))}{'...' if len(routing_info['selected_encoders']) > 15 else ''}
|
972 |
-
|
973 |
-
**π Performance Metrics:**
|
974 |
- **Generation Time**: {generation_time:.2f}s
|
975 |
-
- **
|
976 |
-
- **
|
977 |
- **Success Rate**: {(self.stats['successful_generations'] / max(self.stats['total_requests'], 1) * 100):.1f}%
|
978 |
|
979 |
-
|
980 |
-
{', '.join(
|
981 |
-
|
982 |
-
**π‘ Optimization Notes:**
|
983 |
-
- Encoder selection optimized for domain: {routing_info['detected_domain']}
|
984 |
-
- {'Pretrained weights from HuggingFace' if self.using_pretrained else 'Custom swarm implementation' if self.model_loaded and not self.fallback_mode else 'Simulation mode active'}
|
985 |
-
- Dynamic load balancing across {routing_info['total_active']} active encoders
|
986 |
"""
|
987 |
|
988 |
def get_model_info(self) -> str:
|
989 |
-
"""Get
|
990 |
-
if not self.model:
|
991 |
return "Model not initialized"
|
992 |
|
993 |
-
# Get system information
|
994 |
memory_info = psutil.virtual_memory()
|
995 |
gpu_info = "N/A"
|
996 |
if torch.cuda.is_available():
|
997 |
-
gpu_info = f"{torch.cuda.get_device_name(0)}
|
998 |
|
999 |
-
# Get pretrained model info if available
|
1000 |
pretrained_info = ""
|
1001 |
if self.pretrained_loader:
|
1002 |
model_info = self.pretrained_loader.get_model_info()
|
1003 |
if model_info and 'error' not in model_info:
|
1004 |
pretrained_info = f"""
|
1005 |
-
**π€
|
1006 |
-
- **
|
1007 |
- **Parameters**: {model_info['parameters']} ({model_info['parameters_millions']})
|
1008 |
-
- **
|
1009 |
-
- **Hidden Size**: {model_info['hidden_size']}
|
1010 |
-
- **Model Device**: {model_info['device']}
|
1011 |
-
- **Data Type**: {model_info['dtype']}
|
1012 |
"""
|
1013 |
|
1014 |
-
|
1015 |
-
status_text = f"Loaded {'with Pretrained Weights' if self.using_pretrained else 'with Custom Swarm'}" if self.model_loaded and not self.fallback_mode else "Simulation Mode"
|
1016 |
|
1017 |
return f"""
|
1018 |
-
**π€ Mamba Encoder Swarm
|
1019 |
|
1020 |
-
**
|
1021 |
-
- **Status**: {status_emoji} {status_text}
|
1022 |
-
- **Active Encoders**: {getattr(self.model, 'num_active_encoders', 'N/A')}
|
1023 |
-
- **Max Encoders**: {getattr(self.config, 'max_mamba_encoders', 100)}
|
1024 |
-
- **Model Dimension**: {getattr(self.config, 'd_model', getattr(self.config, 'hidden_size', 768))}
|
1025 |
-
- **Vocabulary Size**: {getattr(self.config, 'vocab_size', 50257):,}
|
1026 |
-
- **Max Sequence Length**: {getattr(self.config, 'max_sequence_length', 'N/A')}
|
1027 |
-
{pretrained_info}
|
1028 |
-
**System Information:**
|
1029 |
- **Device**: {self.device} {f'({gpu_info})' if gpu_info != 'N/A' else ''}
|
1030 |
-
- **RAM Usage**: {memory_info.percent:.1f}%
|
1031 |
-
|
1032 |
-
|
1033 |
-
**Performance Statistics:**
|
1034 |
- **Total Requests**: {self.stats['total_requests']}
|
1035 |
-
- **Successful**: {self.stats['successful_generations']}
|
1036 |
-
- **Failed**: {self.stats['failed_generations']}
|
1037 |
- **Success Rate**: {(self.stats['successful_generations'] / max(self.stats['total_requests'], 1) * 100):.1f}%
|
1038 |
-
- **
|
1039 |
-
- **Total Tokens Generated**: {self.stats['total_tokens_generated']:,}
|
1040 |
-
|
1041 |
-
**Mode**: {'π’ Pretrained Model Active' if self.using_pretrained else 'π΅ Custom Swarm Active' if self.model_loaded and not self.fallback_mode else 'π‘ Simulation Mode'}
|
1042 |
"""
|
1043 |
|
1044 |
-
def get_system_status(self) -> Dict[str, Any]:
|
1045 |
-
"""Get system status for monitoring"""
|
1046 |
-
return {
|
1047 |
-
'model_loaded': self.model_loaded,
|
1048 |
-
'using_pretrained': self.using_pretrained,
|
1049 |
-
'fallback_mode': self.fallback_mode,
|
1050 |
-
'device': str(self.device),
|
1051 |
-
'stats': self.stats.copy(),
|
1052 |
-
'timestamp': datetime.now().isoformat()
|
1053 |
-
}
|
1054 |
-
|
1055 |
def switch_model(self, model_size: str = "auto") -> str:
|
1056 |
-
"""Switch between
|
1057 |
if not self.using_pretrained:
|
1058 |
-
return "β Model switching only available
|
1059 |
|
1060 |
-
|
1061 |
-
|
1062 |
-
"small": "state-spaces/mamba-130m",
|
1063 |
-
"medium": "state-spaces/mamba-790m",
|
1064 |
-
"large": "state-spaces/mamba-1.4b",
|
1065 |
-
"xl": "state-spaces/mamba-2.8b"
|
1066 |
-
}
|
1067 |
-
|
1068 |
-
if model_size == "auto":
|
1069 |
-
# Auto-select based on memory
|
1070 |
-
memory_gb = psutil.virtual_memory().total / (1024**3)
|
1071 |
-
if memory_gb >= 32 and torch.cuda.is_available():
|
1072 |
-
model_size = "xl"
|
1073 |
-
elif memory_gb >= 16 and torch.cuda.is_available():
|
1074 |
-
model_size = "large"
|
1075 |
-
elif memory_gb >= 8:
|
1076 |
-
model_size = "medium"
|
1077 |
-
else:
|
1078 |
-
model_size = "small"
|
1079 |
-
|
1080 |
-
if model_size not in MODEL_OPTIONS:
|
1081 |
-
return f"β Invalid model size. Choose from: {list(MODEL_OPTIONS.keys())}"
|
1082 |
-
|
1083 |
-
selected_model = MODEL_OPTIONS[model_size]
|
1084 |
-
|
1085 |
-
# Check if already using this model
|
1086 |
-
if self.pretrained_loader and self.pretrained_loader.model_name == selected_model:
|
1087 |
-
return f"β
Already using {selected_model}"
|
1088 |
-
|
1089 |
-
logger.info(f"π Switching to model: {selected_model}")
|
1090 |
-
|
1091 |
-
# Clear current model
|
1092 |
-
if self.model:
|
1093 |
-
del self.model
|
1094 |
-
torch.cuda.empty_cache() if torch.cuda.is_available() else None
|
1095 |
-
|
1096 |
-
# Load new model
|
1097 |
-
self.pretrained_loader = MambaWeightLoader(selected_model)
|
1098 |
-
|
1099 |
-
if self.pretrained_loader.download_and_load():
|
1100 |
-
self.model = self.pretrained_loader.model
|
1101 |
-
self.tokenizer = self.pretrained_loader.tokenizer
|
1102 |
-
self.config = self.pretrained_loader.config
|
1103 |
-
|
1104 |
-
logger.info(f"β
Successfully switched to {selected_model}")
|
1105 |
-
return f"β
Successfully switched to {selected_model}"
|
1106 |
-
else:
|
1107 |
-
logger.error(f"β Failed to switch to {selected_model}")
|
1108 |
-
return f"β Failed to switch to {selected_model}"
|
1109 |
-
|
1110 |
-
except Exception as e:
|
1111 |
-
logger.error(f"Error switching model: {e}")
|
1112 |
-
return f"β Error switching model: {str(e)}"
|
1113 |
|
1114 |
def create_production_demo() -> gr.Blocks:
|
1115 |
-
"""Create production-ready Gradio interface
|
1116 |
|
1117 |
-
# Initialize demo with pretrained model capability
|
1118 |
try:
|
1119 |
demo_instance = MambaSwarmDemo(model_path="./", fallback_mode=False)
|
1120 |
except Exception as e:
|
1121 |
-
logger.warning(f"Primary
|
1122 |
demo_instance = MambaSwarmDemo(model_path="./", fallback_mode=True)
|
1123 |
|
1124 |
def generate_response(prompt, max_length, temperature, top_p, num_encoders, show_routing):
|
@@ -1127,172 +649,77 @@ def create_production_demo() -> gr.Blocks:
|
|
1127 |
def show_model_info():
|
1128 |
return demo_instance.get_model_info()
|
1129 |
|
1130 |
-
def refresh_model_info():
|
1131 |
-
return demo_instance.get_model_info()
|
1132 |
-
|
1133 |
-
def switch_model_size(model_size):
|
1134 |
-
result = demo_instance.switch_model(model_size)
|
1135 |
-
return result, demo_instance.get_model_info()
|
1136 |
-
|
1137 |
# Create interface
|
1138 |
with gr.Blocks(
|
1139 |
-
title="Mamba Encoder Swarm - Production Demo
|
1140 |
theme=gr.themes.Soft(),
|
1141 |
css="""
|
1142 |
-
.gradio-container {
|
1143 |
-
|
1144 |
-
|
1145 |
-
}
|
1146 |
-
.model-info {
|
1147 |
-
background-color: #f8f9fa;
|
1148 |
-
border-radius: 8px;
|
1149 |
-
padding: 15px;
|
1150 |
-
margin: 10px 0;
|
1151 |
-
}
|
1152 |
-
.routing-info {
|
1153 |
-
background-color: #e8f4fd;
|
1154 |
-
border-radius: 8px;
|
1155 |
-
padding: 15px;
|
1156 |
-
margin: 10px 0;
|
1157 |
-
}
|
1158 |
-
.status-indicator {
|
1159 |
-
background-color: #d4edda;
|
1160 |
-
border: 1px solid #c3e6cb;
|
1161 |
-
border-radius: 8px;
|
1162 |
-
padding: 10px;
|
1163 |
-
margin: 10px 0;
|
1164 |
-
}
|
1165 |
"""
|
1166 |
) as demo:
|
1167 |
|
1168 |
-
# Header
|
1169 |
gr.Markdown("""
|
1170 |
# π Mamba Encoder Swarm - Production Demo
|
1171 |
|
1172 |
-
**Advanced Language Model with
|
1173 |
|
1174 |
-
|
1175 |
-
with intelligent domain-aware routing across up to 100 specialized encoders.
|
1176 |
""")
|
1177 |
|
1178 |
-
# Status
|
1179 |
with gr.Row():
|
1180 |
-
|
1181 |
-
|
1182 |
-
status_indicator = gr.Markdown(
|
1183 |
-
f"**Status**: {status_text}",
|
1184 |
-
elem_classes=["status-indicator"]
|
1185 |
-
)
|
1186 |
-
with gr.Column(scale=1):
|
1187 |
-
if demo_instance.using_pretrained:
|
1188 |
-
model_switch = gr.Dropdown(
|
1189 |
-
choices=["auto", "small", "medium", "large", "xl"],
|
1190 |
-
value="auto",
|
1191 |
-
label="π Switch Model",
|
1192 |
-
info="Change pretrained model size"
|
1193 |
-
)
|
1194 |
-
switch_btn = gr.Button("Switch Model", variant="secondary", size="sm")
|
1195 |
|
1196 |
with gr.Row():
|
1197 |
-
# Left column
|
1198 |
with gr.Column(scale=2):
|
1199 |
prompt_input = gr.Textbox(
|
1200 |
label="π Input Prompt",
|
1201 |
-
placeholder="Enter your prompt here...
|
1202 |
-
lines=4
|
1203 |
-
max_lines=8
|
1204 |
)
|
1205 |
|
1206 |
-
with gr.Accordion("βοΈ
|
1207 |
with gr.Row():
|
1208 |
-
max_length = gr.Slider(
|
1209 |
-
|
1210 |
-
minimum=50,
|
1211 |
-
maximum=1000,
|
1212 |
-
value=200,
|
1213 |
-
step=25,
|
1214 |
-
info="Maximum number of tokens to generate"
|
1215 |
-
)
|
1216 |
-
temperature = gr.Slider(
|
1217 |
-
label="Temperature",
|
1218 |
-
minimum=0.1,
|
1219 |
-
maximum=2.0,
|
1220 |
-
value=0.7,
|
1221 |
-
step=0.1,
|
1222 |
-
info="Controls randomness (lower = more focused)"
|
1223 |
-
)
|
1224 |
-
|
1225 |
with gr.Row():
|
1226 |
-
top_p = gr.Slider(
|
1227 |
-
|
1228 |
-
minimum=0.1,
|
1229 |
-
maximum=1.0,
|
1230 |
-
value=0.9,
|
1231 |
-
step=0.05,
|
1232 |
-
info="Probability mass for nucleus sampling"
|
1233 |
-
)
|
1234 |
-
num_encoders = gr.Slider(
|
1235 |
-
label="Target Active Encoders",
|
1236 |
-
minimum=1,
|
1237 |
-
maximum=25,
|
1238 |
-
value=8,
|
1239 |
-
step=1,
|
1240 |
-
info="Preferred number of encoders to activate"
|
1241 |
-
)
|
1242 |
|
1243 |
-
show_routing = gr.Checkbox(
|
1244 |
-
label="Show Routing Information",
|
1245 |
-
value=True,
|
1246 |
-
info="Display detailed routing and performance metrics"
|
1247 |
-
)
|
1248 |
|
1249 |
-
generate_btn = gr.Button("π Generate
|
1250 |
-
|
1251 |
-
# Right column
|
1252 |
with gr.Column(scale=3):
|
1253 |
response_output = gr.Textbox(
|
1254 |
label="π Generated Response",
|
1255 |
lines=12,
|
1256 |
-
max_lines=20,
|
1257 |
interactive=False,
|
1258 |
show_copy_button=True
|
1259 |
)
|
1260 |
|
1261 |
routing_output = gr.Markdown(
|
1262 |
-
label="π Routing
|
1263 |
-
visible=True,
|
1264 |
elem_classes=["routing-info"]
|
1265 |
)
|
1266 |
|
1267 |
-
# Model
|
1268 |
-
with gr.Accordion("π€ Model Information
|
1269 |
-
|
1270 |
-
|
1271 |
-
value=show_model_info(),
|
1272 |
-
elem_classes=["model-info"]
|
1273 |
-
)
|
1274 |
-
with gr.Column(scale=1):
|
1275 |
-
refresh_info_btn = gr.Button("π Refresh Info", size="sm")
|
1276 |
-
if demo_instance.using_pretrained:
|
1277 |
-
model_status = gr.Textbox(
|
1278 |
-
label="Model Switch Status",
|
1279 |
-
interactive=False,
|
1280 |
-
lines=2
|
1281 |
-
)
|
1282 |
|
1283 |
-
# Examples
|
1284 |
-
with gr.Accordion("π‘
|
1285 |
-
gr.Markdown("### Try these examples to see domain-specific routing in action:")
|
1286 |
-
|
1287 |
examples = [
|
1288 |
-
["Explain
|
1289 |
-
["Write a Python
|
1290 |
-
["What are the
|
1291 |
-
["
|
1292 |
-
["Write a creative short story about a time-traveling scientist", 400, 0.9, 0.95, 12, True],
|
1293 |
-
["Develop a marketing strategy for a sustainable fashion startup", 300, 0.8, 0.9, 10, True],
|
1294 |
-
["How does quantum entanglement work and what are its applications?", 350, 0.6, 0.9, 15, True],
|
1295 |
-
["Explain the economic impact of renewable energy adoption", 300, 0.7, 0.9, 12, True]
|
1296 |
]
|
1297 |
|
1298 |
gr.Examples(
|
@@ -1300,141 +727,59 @@ def create_production_demo() -> gr.Blocks:
|
|
1300 |
inputs=[prompt_input, max_length, temperature, top_p, num_encoders, show_routing],
|
1301 |
outputs=[response_output, routing_output],
|
1302 |
fn=generate_response,
|
1303 |
-
cache_examples=False
|
1304 |
-
label="Click any example to load it"
|
1305 |
)
|
1306 |
|
1307 |
-
# Advanced features section
|
1308 |
-
with gr.Accordion("π¬ Advanced Features", open=False):
|
1309 |
-
gr.Markdown("""
|
1310 |
-
### π Pretrained Model Features
|
1311 |
-
- **Automatic Model Selection**: Chooses optimal model size based on available memory
|
1312 |
-
- **Dynamic Model Switching**: Switch between different Mamba model sizes
|
1313 |
-
- **HuggingFace Integration**: Direct loading from state-spaces repository
|
1314 |
-
- **Memory Optimization**: Efficient loading with half-precision and device mapping
|
1315 |
-
|
1316 |
-
### π§ Intelligent Routing System
|
1317 |
-
- **Domain Detection**: Automatic classification of prompt domains
|
1318 |
-
- **Specialized Encoders**: 100+ domain-specific encoder pools
|
1319 |
-
- **Load Balancing**: Dynamic distribution across active encoders
|
1320 |
-
- **Confidence Scoring**: Weighted aggregation based on encoder confidence
|
1321 |
-
|
1322 |
-
### π Model Sizes Available
|
1323 |
-
- **Small (130M)**: ~500MB, good for basic tasks
|
1324 |
-
- **Medium (790M)**: ~3GB, balanced performance
|
1325 |
-
- **Large (1.4B)**: ~5GB, high-quality responses
|
1326 |
-
- **XL (2.8B)**: ~10GB, best performance (requires 16GB+ RAM)
|
1327 |
-
""")
|
1328 |
-
|
1329 |
# Event handlers
|
1330 |
generate_btn.click(
|
1331 |
fn=generate_response,
|
1332 |
inputs=[prompt_input, max_length, temperature, top_p, num_encoders, show_routing],
|
1333 |
-
outputs=[response_output, routing_output]
|
1334 |
-
api_name="generate"
|
1335 |
)
|
1336 |
|
1337 |
-
|
1338 |
-
fn=refresh_model_info,
|
1339 |
-
outputs=model_info_display
|
1340 |
-
)
|
1341 |
-
|
1342 |
-
# Model switching event handler (only if using pretrained)
|
1343 |
-
if demo_instance.using_pretrained:
|
1344 |
-
switch_btn.click(
|
1345 |
-
fn=switch_model_size,
|
1346 |
-
inputs=[model_switch],
|
1347 |
-
outputs=[model_status, model_info_display]
|
1348 |
-
)
|
1349 |
-
|
1350 |
-
# Auto-refresh status on page load
|
1351 |
-
demo.load(
|
1352 |
-
fn=lambda: (demo_instance.get_model_info(), f"**Status**: {'π’ Real Pretrained Model' if demo_instance.using_pretrained else 'π΅ Custom Swarm Model' if demo_instance.model_loaded and not demo_instance.fallback_mode else 'π‘ Simulation Mode'}"),
|
1353 |
-
outputs=[model_info_display, status_indicator]
|
1354 |
-
)
|
1355 |
|
1356 |
# Footer
|
1357 |
gr.Markdown("""
|
1358 |
---
|
1359 |
-
###
|
1360 |
-
|
1361 |
-
|
1362 |
-
-
|
1363 |
-
-
|
1364 |
-
-
|
1365 |
-
-
|
1366 |
-
|
1367 |
-
**π§ Intelligent Routing System**
|
1368 |
-
- Domain detection based on prompt analysis
|
1369 |
-
- Dynamic encoder selection optimized for content type
|
1370 |
-
- Load balancing across specialized encoder pools
|
1371 |
-
- Confidence-weighted response aggregation
|
1372 |
-
|
1373 |
-
**π§ Production Features**
|
1374 |
-
- Comprehensive error handling and fallback modes
|
1375 |
-
- Real-time performance monitoring and statistics
|
1376 |
-
- Memory optimization and CUDA support
|
1377 |
-
- Detailed logging and debugging capabilities
|
1378 |
-
|
1379 |
-
**π Specialized Domains**
|
1380 |
-
- **Medical & Healthcare** β’ **Legal & Regulatory** β’ **Code & Technical**
|
1381 |
-
- **Science & Research** β’ **Creative Writing** β’ **Business & Finance**
|
1382 |
-
|
1383 |
-
Built with β€οΈ using Gradio, PyTorch, HuggingFace Transformers, and the Mamba architecture
|
1384 |
""")
|
1385 |
|
1386 |
return demo
|
1387 |
|
|
|
1388 |
if __name__ == "__main__":
|
1389 |
-
# Create and launch production demo
|
1390 |
try:
|
1391 |
demo = create_production_demo()
|
1392 |
|
1393 |
-
#
|
1394 |
launch_kwargs = {
|
1395 |
"server_name": "0.0.0.0",
|
1396 |
"server_port": 7860,
|
1397 |
-
"share": False,
|
1398 |
"debug": False,
|
1399 |
"show_error": True,
|
1400 |
-
"quiet": False
|
1401 |
}
|
1402 |
|
1403 |
-
#
|
1404 |
try:
|
1405 |
-
# Test if these parameters are supported in this Gradio version
|
1406 |
-
import gradio as gr
|
1407 |
import inspect
|
1408 |
launch_signature = inspect.signature(gr.Blocks.launch)
|
1409 |
-
|
1410 |
-
# Add parameters if supported
|
1411 |
-
if 'favicon_path' in launch_signature.parameters:
|
1412 |
-
launch_kwargs['favicon_path'] = None
|
1413 |
-
if 'ssl_verify' in launch_signature.parameters:
|
1414 |
-
launch_kwargs['ssl_verify'] = False
|
1415 |
-
if 'show_tips' in launch_signature.parameters:
|
1416 |
-
launch_kwargs['show_tips'] = True
|
1417 |
-
if 'enable_queue' in launch_signature.parameters:
|
1418 |
-
launch_kwargs['enable_queue'] = True
|
1419 |
if 'max_threads' in launch_signature.parameters:
|
1420 |
launch_kwargs['max_threads'] = 10
|
1421 |
-
|
1422 |
-
|
1423 |
-
logger.warning(f"Could not detect Gradio parameters: {e}")
|
1424 |
|
1425 |
-
|
1426 |
-
logger.info(f"Launching with parameters: {list(launch_kwargs.keys())}")
|
1427 |
demo.launch(**launch_kwargs)
|
1428 |
|
1429 |
except Exception as e:
|
1430 |
-
logger.error(f"
|
1431 |
print(f"β Demo launch failed: {e}")
|
1432 |
-
print("Please check the logs for more details.")
|
1433 |
-
|
1434 |
-
# Try minimal launch as last resort
|
1435 |
-
try:
|
1436 |
-
logger.info("Attempting minimal launch...")
|
1437 |
-
demo.launch(share=False, debug=False)
|
1438 |
-
except Exception as e2:
|
1439 |
-
logger.error(f"Minimal launch also failed: {e2}")
|
1440 |
-
print(f"β All launch attempts failed. Error: {e2}")
|
|
|
1 |
#!/usr/bin/env python3
|
2 |
"""
|
3 |
+
Enhanced Production-Ready Mamba Encoder Swarm Demo - COMPLETE PRODUCTION VERSION
|
4 |
+
Integrates pretrained Mamba weights with comprehensive optimization and error handling
|
5 |
"""
|
6 |
|
7 |
import gradio as gr
|
|
|
12 |
import logging
|
13 |
import os
|
14 |
import psutil
|
15 |
+
import gc
|
16 |
+
import warnings
|
17 |
+
from typing import Optional, Dict, Any, Tuple, List
|
18 |
from datetime import datetime
|
19 |
+
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM, GPT2Tokenizer
|
20 |
from huggingface_hub import snapshot_download, hf_hub_download
|
21 |
|
22 |
+
# Suppress warnings for cleaner output
|
23 |
+
warnings.filterwarnings("ignore", category=UserWarning)
|
24 |
+
warnings.filterwarnings("ignore", category=FutureWarning)
|
25 |
+
|
26 |
# Setup comprehensive logging
|
27 |
logging.basicConfig(
|
28 |
level=logging.INFO,
|
|
|
35 |
logger = logging.getLogger(__name__)
|
36 |
|
37 |
class MambaWeightLoader:
|
38 |
+
"""Dynamic loader for pretrained Mamba weights with compatibility fixes"""
|
39 |
|
40 |
def __init__(self, model_name="state-spaces/mamba-130m"):
|
41 |
self.model_name = model_name
|
|
|
43 |
self.model = None
|
44 |
self.tokenizer = None
|
45 |
self.config = None
|
46 |
+
|
47 |
+
# Compatibility configurations for different model sizes
|
48 |
+
self.mamba_configs = {
|
49 |
+
"state-spaces/mamba-130m": {
|
50 |
+
"d_model": 768,
|
51 |
+
"vocab_size": 50280,
|
52 |
+
"expected_params": 130_000_000
|
53 |
+
},
|
54 |
+
"state-spaces/mamba-790m": {
|
55 |
+
"d_model": 1536,
|
56 |
+
"vocab_size": 50280,
|
57 |
+
"expected_params": 790_000_000
|
58 |
+
},
|
59 |
+
"state-spaces/mamba-1.4b": {
|
60 |
+
"d_model": 2048,
|
61 |
+
"vocab_size": 50280,
|
62 |
+
"expected_params": 1_400_000_000
|
63 |
+
},
|
64 |
+
"state-spaces/mamba-2.8b": {
|
65 |
+
"d_model": 2560,
|
66 |
+
"vocab_size": 50280,
|
67 |
+
"expected_params": 2_800_000_000
|
68 |
+
}
|
69 |
+
}
|
70 |
+
|
71 |
+
def _optimize_device_settings(self):
|
72 |
+
"""Optimize device and memory settings"""
|
73 |
+
if torch.cuda.is_available():
|
74 |
+
torch.backends.cudnn.benchmark = True
|
75 |
+
torch.backends.cudnn.enabled = True
|
76 |
+
torch.cuda.empty_cache()
|
77 |
+
|
78 |
+
gpu_memory = torch.cuda.get_device_properties(0).total_memory
|
79 |
+
available_memory = gpu_memory - torch.cuda.memory_reserved(0)
|
80 |
+
|
81 |
+
if available_memory > 8 * 1024**3: # 8GB+
|
82 |
+
dtype = torch.float16
|
83 |
+
device_map = "auto"
|
84 |
+
else:
|
85 |
+
dtype = torch.float32
|
86 |
+
device_map = None
|
87 |
+
|
88 |
+
device = torch.device("cuda:0")
|
89 |
+
logger.info(f"π GPU optimization enabled: {torch.cuda.get_device_name(0)}")
|
90 |
+
logger.info(f"πΎ Available GPU memory: {available_memory / 1024**3:.1f}GB")
|
91 |
+
else:
|
92 |
+
dtype = torch.float32
|
93 |
+
device = torch.device("cpu")
|
94 |
+
device_map = None
|
95 |
+
logger.info("π§ Using CPU - consider GPU for better performance")
|
96 |
+
|
97 |
+
return device, dtype, device_map
|
98 |
+
|
99 |
+
def _fix_config_compatibility(self, config):
|
100 |
+
"""Fix configuration compatibility issues"""
|
101 |
+
model_config = self.mamba_configs.get(self.model_name)
|
102 |
+
if model_config:
|
103 |
+
if hasattr(config, 'd_model'):
|
104 |
+
config.d_model = model_config['d_model']
|
105 |
+
if hasattr(config, 'vocab_size'):
|
106 |
+
config.vocab_size = model_config['vocab_size']
|
107 |
+
logger.info(f"π§ Applied compatibility fixes for {self.model_name}")
|
108 |
+
return config
|
109 |
|
110 |
def download_and_load(self):
|
111 |
+
"""Download and load Mamba weights with enhanced error handling"""
|
112 |
try:
|
113 |
logger.info(f"π Loading pretrained model: {self.model_name}")
|
|
|
|
|
114 |
os.makedirs(self.cache_dir, exist_ok=True)
|
115 |
|
116 |
+
device, dtype, device_map = self._optimize_device_settings()
|
117 |
+
|
118 |
+
# Load tokenizer with fallback
|
119 |
logger.info("π Loading tokenizer...")
|
120 |
try:
|
|
|
121 |
self.tokenizer = AutoTokenizer.from_pretrained(
|
122 |
self.model_name,
|
123 |
cache_dir=self.cache_dir,
|
124 |
trust_remote_code=True,
|
125 |
+
use_fast=False
|
126 |
)
|
127 |
+
logger.info("β
Loaded native tokenizer")
|
128 |
+
except Exception as e:
|
129 |
+
logger.warning(f"Native tokenizer failed: {e}")
|
|
|
|
|
130 |
self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
131 |
+
logger.info("β
Using GPT2 tokenizer fallback")
|
132 |
|
133 |
+
# Configure padding
|
134 |
if self.tokenizer.pad_token is None:
|
135 |
if self.tokenizer.eos_token is not None:
|
136 |
self.tokenizer.pad_token = self.tokenizer.eos_token
|
137 |
else:
|
138 |
self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})
|
139 |
|
140 |
+
# Load config with fixes
|
141 |
logger.info("βοΈ Loading model configuration...")
|
142 |
self.config = AutoConfig.from_pretrained(
|
143 |
self.model_name,
|
144 |
cache_dir=self.cache_dir,
|
145 |
trust_remote_code=True
|
146 |
)
|
147 |
+
self.config = self._fix_config_compatibility(self.config)
|
148 |
|
149 |
+
# Load model with multiple strategies
|
150 |
logger.info("π§ Loading model weights...")
|
|
|
|
|
|
|
|
|
|
|
151 |
try:
|
152 |
self.model = AutoModelForCausalLM.from_pretrained(
|
153 |
self.model_name,
|
|
|
155 |
cache_dir=self.cache_dir,
|
156 |
trust_remote_code=True,
|
157 |
torch_dtype=dtype,
|
158 |
+
device_map=device_map,
|
159 |
+
low_cpu_mem_usage=True,
|
160 |
+
use_safetensors=True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
)
|
162 |
+
logger.info("β
Optimized loading successful")
|
163 |
+
except Exception as e1:
|
164 |
+
logger.warning(f"Optimized loading failed: {e1}")
|
165 |
+
try:
|
166 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
167 |
+
self.model_name,
|
168 |
+
trust_remote_code=True,
|
169 |
+
torch_dtype=dtype
|
170 |
+
)
|
171 |
+
logger.info("β
Basic loading successful")
|
172 |
+
except Exception as e2:
|
173 |
+
logger.error(f"All loading strategies failed: {e2}")
|
174 |
+
return False
|
175 |
|
176 |
+
# Post-loading optimization
|
177 |
+
if not hasattr(self.model, 'hf_device_map'):
|
178 |
self.model.to(device)
|
|
|
179 |
self.model.eval()
|
180 |
|
181 |
+
# Log success
|
182 |
num_params = sum(p.numel() for p in self.model.parameters())
|
183 |
+
logger.info(f"β
Model loaded: {num_params:,} parameters ({num_params/1e6:.1f}M)")
|
|
|
184 |
logger.info(f"π§ Device: {device}, dtype: {dtype}")
|
185 |
|
186 |
return True
|
187 |
|
188 |
except Exception as e:
|
189 |
+
logger.error(f"β Error loading model: {e}")
|
190 |
return False
|
191 |
|
192 |
def get_model_info(self):
|
193 |
+
"""Get comprehensive model information"""
|
194 |
if self.model:
|
195 |
try:
|
196 |
num_params = sum(p.numel() for p in self.model.parameters())
|
|
|
207 |
"hidden_size": getattr(self.config, 'd_model', getattr(self.config, 'hidden_size', 'Unknown'))
|
208 |
}
|
209 |
except Exception as e:
|
|
|
210 |
return {"error": str(e)}
|
211 |
return None
|
212 |
|
213 |
+
|
214 |
+
class PerformanceMonitor:
|
215 |
+
"""Advanced performance monitoring"""
|
216 |
+
|
217 |
+
def __init__(self):
|
218 |
+
self.metrics = {
|
219 |
+
"generation_times": [],
|
220 |
+
"token_counts": [],
|
221 |
+
"success_count": 0,
|
222 |
+
"failure_count": 0,
|
223 |
+
"start_time": time.time()
|
224 |
+
}
|
225 |
+
|
226 |
+
def log_generation(self, generation_time: float, token_count: int, success: bool):
|
227 |
+
"""Log generation performance"""
|
228 |
+
self.metrics["generation_times"].append(generation_time)
|
229 |
+
self.metrics["token_counts"].append(token_count)
|
230 |
+
|
231 |
+
if success:
|
232 |
+
self.metrics["success_count"] += 1
|
233 |
+
tokens_per_second = token_count / max(generation_time, 0.001)
|
234 |
+
logger.info(f"β‘ Generation: {generation_time:.2f}s, {token_count} tokens, {tokens_per_second:.1f} tok/s")
|
235 |
+
else:
|
236 |
+
self.metrics["failure_count"] += 1
|
237 |
+
|
238 |
+
def get_performance_stats(self) -> Dict[str, Any]:
|
239 |
+
"""Get performance statistics"""
|
240 |
+
if not self.metrics["generation_times"]:
|
241 |
+
return {"status": "No data available"}
|
242 |
+
|
243 |
+
times = self.metrics["generation_times"]
|
244 |
+
tokens = self.metrics["token_counts"]
|
245 |
+
|
246 |
+
total_requests = self.metrics["success_count"] + self.metrics["failure_count"]
|
247 |
+
success_rate = (self.metrics["success_count"] / total_requests * 100) if total_requests > 0 else 0
|
248 |
+
|
249 |
+
return {
|
250 |
+
"total_requests": total_requests,
|
251 |
+
"success_rate": f"{success_rate:.1f}%",
|
252 |
+
"avg_generation_time": f"{sum(times) / len(times):.2f}s",
|
253 |
+
"avg_tokens_per_second": f"{sum(tokens) / sum(times):.1f}" if sum(times) > 0 else "0",
|
254 |
+
"uptime": f"{(time.time() - self.metrics['start_time']) / 60:.1f} minutes"
|
255 |
+
}
|
256 |
+
|
257 |
+
|
258 |
class MambaSwarmDemo:
|
259 |
+
"""Enhanced Production-ready Mamba Swarm Demo"""
|
260 |
|
261 |
def __init__(self, model_path: str = "./", fallback_mode: bool = False):
|
262 |
+
# Core attributes
|
263 |
self.model = None
|
264 |
self.tokenizer = None
|
265 |
self.config = None
|
|
|
270 |
self.pretrained_loader = None
|
271 |
self.using_pretrained = False
|
272 |
|
273 |
+
# Performance monitoring
|
274 |
+
self.performance_monitor = PerformanceMonitor()
|
275 |
+
|
276 |
+
# Statistics
|
277 |
self.stats = {
|
278 |
'total_requests': 0,
|
279 |
'successful_generations': 0,
|
|
|
282 |
'total_tokens_generated': 0
|
283 |
}
|
284 |
|
285 |
+
# Domain detection
|
286 |
self.domain_keywords = {
|
287 |
+
'medical': ['medical', 'health', 'doctor', 'patient', 'disease', 'treatment'],
|
288 |
+
'legal': ['legal', 'law', 'court', 'judge', 'contract', 'attorney'],
|
289 |
+
'code': ['code', 'python', 'programming', 'function', 'algorithm', 'software'],
|
290 |
+
'science': ['science', 'research', 'experiment', 'theory', 'physics'],
|
291 |
+
'creative': ['story', 'creative', 'write', 'novel', 'poem', 'character'],
|
292 |
+
'business': ['business', 'marketing', 'strategy', 'finance', 'management'],
|
293 |
+
'general': ['explain', 'what', 'how', 'why', 'describe', 'tell']
|
294 |
}
|
295 |
|
296 |
+
# Initialize model
|
297 |
self._initialize_model()
|
298 |
+
logger.info(f"π Demo initialized - Model: {self.model_loaded}, Pretrained: {self.using_pretrained}")
|
299 |
|
300 |
def _initialize_model(self):
|
301 |
+
"""Initialize model with fallback chain"""
|
302 |
try:
|
|
|
|
|
|
|
303 |
success = self._load_pretrained_model()
|
|
|
304 |
if not success:
|
|
|
305 |
success = self._load_custom_swarm_model()
|
|
|
306 |
if not success:
|
|
|
307 |
self.fallback_mode = True
|
308 |
self._initialize_fallback_mode()
|
|
|
309 |
except Exception as e:
|
310 |
logger.error(f"Model initialization failed: {e}")
|
|
|
311 |
self.fallback_mode = True
|
312 |
self._initialize_fallback_mode()
|
313 |
|
314 |
def _load_pretrained_model(self):
|
315 |
+
"""Load pretrained model with smart selection"""
|
316 |
try:
|
|
|
317 |
MODEL_OPTIONS = {
|
318 |
+
"small": "gpt2",
|
319 |
+
"medium": "microsoft/DialoGPT-medium",
|
320 |
+
"mamba-small": "state-spaces/mamba-130m",
|
321 |
+
"mamba-medium": "state-spaces/mamba-790m",
|
322 |
+
"mamba-large": "state-spaces/mamba-1.4b",
|
|
|
323 |
}
|
324 |
|
325 |
+
# Select based on available resources
|
326 |
memory_gb = psutil.virtual_memory().total / (1024**3)
|
327 |
+
has_gpu = torch.cuda.is_available()
|
328 |
|
329 |
+
if has_gpu and memory_gb >= 16:
|
330 |
+
priority = ["mamba-large", "mamba-medium", "medium", "small"]
|
|
|
|
|
|
|
|
|
331 |
elif memory_gb >= 8:
|
332 |
+
priority = ["mamba-medium", "mamba-small", "medium", "small"]
|
333 |
else:
|
334 |
+
priority = ["mamba-small", "small"]
|
335 |
|
336 |
+
logger.info(f"π― Model priority: {priority} (RAM: {memory_gb:.1f}GB, GPU: {has_gpu})")
|
337 |
|
338 |
+
for model_key in priority:
|
|
|
339 |
selected_model = MODEL_OPTIONS[model_key]
|
340 |
+
logger.info(f"π Trying: {selected_model}")
|
341 |
|
342 |
try:
|
|
|
343 |
self.pretrained_loader = MambaWeightLoader(selected_model)
|
|
|
|
|
344 |
if self.pretrained_loader.download_and_load():
|
345 |
self.model = self.pretrained_loader.model
|
346 |
self.tokenizer = self.pretrained_loader.tokenizer
|
347 |
self.config = self.pretrained_loader.config
|
348 |
self.model_loaded = True
|
349 |
self.using_pretrained = True
|
350 |
+
logger.info(f"β
Loaded: {selected_model}")
|
|
|
351 |
return True
|
352 |
+
except Exception as e:
|
353 |
+
logger.warning(f"β {selected_model} failed: {e}")
|
|
|
|
|
|
|
|
|
354 |
continue
|
355 |
|
|
|
356 |
return False
|
|
|
357 |
except Exception as e:
|
358 |
+
logger.error(f"Pretrained loading error: {e}")
|
359 |
return False
|
360 |
|
361 |
def _load_custom_swarm_model(self):
|
362 |
+
"""Try to load custom swarm model"""
|
363 |
try:
|
364 |
+
logger.info("Attempting custom swarm model...")
|
365 |
+
# Implementation would go here for custom models
|
366 |
+
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
367 |
except Exception as e:
|
368 |
+
logger.error(f"Custom model error: {e}")
|
369 |
return False
|
370 |
|
371 |
def _initialize_fallback_mode(self):
|
372 |
+
"""Initialize simulation mode"""
|
373 |
+
logger.info("Initializing simulation mode")
|
374 |
|
375 |
+
self.config = type('MockConfig', (), {
|
376 |
+
'max_mamba_encoders': 100,
|
377 |
+
'num_encoders': 8,
|
378 |
+
'd_model': 768,
|
379 |
+
'vocab_size': 50257
|
380 |
+
})()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
381 |
|
|
|
382 |
class MockTokenizer:
|
383 |
def __init__(self):
|
384 |
self.pad_token_id = 0
|
385 |
self.eos_token_id = 1
|
|
|
|
|
386 |
|
387 |
def encode(self, text, return_tensors=None):
|
388 |
+
tokens = [hash(word) % 1000 for word in text.split()]
|
389 |
+
return torch.tensor([tokens]) if return_tensors == "pt" else tokens
|
|
|
|
|
|
|
390 |
|
391 |
+
def decode(self, tokens, skip_special_tokens=True):
|
392 |
+
return f"Simulated response for {len(tokens)} tokens"
|
|
|
|
|
393 |
|
|
|
394 |
class MockModel:
|
395 |
def __init__(self, config):
|
396 |
self.config = config
|
397 |
self.num_active_encoders = 5
|
398 |
|
|
|
|
|
|
|
399 |
def eval(self):
|
400 |
pass
|
401 |
|
402 |
+
self.tokenizer = MockTokenizer()
|
403 |
self.model = MockModel(self.config)
|
404 |
+
logger.info("Simulation mode ready")
|
405 |
|
406 |
def _detect_domain(self, prompt: str) -> Tuple[str, float]:
|
407 |
+
"""Detect prompt domain"""
|
408 |
prompt_lower = prompt.lower()
|
409 |
domain_scores = {}
|
410 |
|
|
|
421 |
return 'general', 0.5
|
422 |
|
423 |
def _simulate_encoder_selection(self, prompt: str, num_encoders: int) -> Dict[str, Any]:
|
424 |
+
"""Simulate encoder selection"""
|
425 |
domain, confidence = self._detect_domain(prompt)
|
426 |
|
|
|
427 |
domain_ranges = {
|
428 |
+
'medical': (1, 20), 'legal': (21, 40), 'code': (41, 60),
|
429 |
+
'science': (61, 80), 'creative': (81, 95), 'business': (96, 100),
|
|
|
|
|
|
|
|
|
430 |
'general': (1, 100)
|
431 |
}
|
432 |
|
433 |
start, end = domain_ranges.get(domain, (1, 100))
|
434 |
available_encoders = list(range(start, min(end + 1, 101)))
|
435 |
|
436 |
+
optimal_count = min(max(num_encoders, 3), 25)
|
|
|
|
|
|
|
437 |
if len(available_encoders) >= optimal_count:
|
438 |
selected = np.random.choice(available_encoders, size=optimal_count, replace=False)
|
439 |
else:
|
440 |
selected = available_encoders
|
441 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
442 |
return {
|
443 |
+
'selected_encoders': sorted(selected.tolist()),
|
444 |
+
'confidence_scores': np.random.uniform(0.6, 0.95, len(selected)).tolist(),
|
445 |
'detected_domain': domain,
|
446 |
'domain_confidence': confidence,
|
447 |
+
'total_active': len(selected)
|
448 |
}
|
449 |
|
450 |
+
def generate_text(self, prompt: str, max_length: int = 100, temperature: float = 0.7,
|
451 |
top_p: float = 0.9, num_encoders: int = 5, show_routing: bool = True) -> Tuple[str, str]:
|
452 |
+
"""Generate text with routing information"""
|
453 |
start_time = time.time()
|
|
|
|
|
454 |
self.stats['total_requests'] += 1
|
455 |
|
456 |
try:
|
457 |
if not prompt.strip():
|
458 |
return "Please enter a prompt.", ""
|
459 |
|
|
|
460 |
routing_info = self._simulate_encoder_selection(prompt, num_encoders)
|
461 |
|
462 |
if self.model_loaded and not self.fallback_mode:
|
463 |
+
response = self._generate_real(prompt, max_length, temperature, top_p)
|
|
|
464 |
else:
|
465 |
+
response = self._generate_simulation(prompt, routing_info['detected_domain'])
|
|
|
466 |
|
467 |
+
# Update performance metrics
|
468 |
generation_time = time.time() - start_time
|
469 |
estimated_tokens = len(response.split())
|
470 |
|
|
|
471 |
self.stats['successful_generations'] += 1
|
472 |
self.stats['total_tokens_generated'] += estimated_tokens
|
473 |
+
self.performance_monitor.log_generation(generation_time, estimated_tokens, True)
|
474 |
|
475 |
+
# Create routing display
|
|
|
|
|
|
|
|
|
|
|
476 |
routing_display = ""
|
477 |
if show_routing:
|
478 |
routing_display = self._create_routing_display(routing_info, generation_time, estimated_tokens)
|
479 |
|
|
|
480 |
return response, routing_display
|
481 |
|
482 |
except Exception as e:
|
483 |
self.stats['failed_generations'] += 1
|
484 |
+
error_msg = f"Generation error: {str(e)}"
|
485 |
logger.error(error_msg)
|
486 |
return error_msg, ""
|
487 |
|
488 |
+
def _generate_real(self, prompt: str, max_length: int, temperature: float, top_p: float) -> str:
|
489 |
+
"""Generate using real model"""
|
|
|
490 |
try:
|
491 |
+
inputs = self.tokenizer.encode(prompt, return_tensors="pt").to(self.device)
|
|
|
|
|
|
|
|
|
|
|
492 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
493 |
with torch.no_grad():
|
494 |
+
outputs = self.model.generate(
|
495 |
+
inputs,
|
496 |
+
max_new_tokens=min(max_length, 300),
|
497 |
+
temperature=max(temperature, 0.1),
|
498 |
+
top_p=max(top_p, 0.1),
|
499 |
+
do_sample=True,
|
500 |
+
pad_token_id=getattr(self.tokenizer, 'pad_token_id', 0),
|
501 |
+
eos_token_id=getattr(self.tokenizer, 'eos_token_id', 1),
|
502 |
+
repetition_penalty=1.1
|
503 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
504 |
|
505 |
+
generated_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
|
|
|
|
|
|
|
506 |
|
|
|
507 |
if generated_text.startswith(prompt):
|
508 |
response = generated_text[len(prompt):].strip()
|
509 |
else:
|
510 |
response = generated_text.strip()
|
511 |
|
512 |
+
return response if response else self._generate_simulation(prompt, 'general')
|
|
|
|
|
|
|
|
|
|
|
513 |
|
|
|
|
|
|
|
514 |
except Exception as e:
|
515 |
logger.error(f"Real generation error: {e}")
|
516 |
+
return self._generate_simulation(prompt, 'general')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
517 |
|
518 |
+
def _generate_simulation(self, prompt: str, domain: str) -> str:
|
519 |
+
"""Generate simulated response"""
|
|
|
|
|
|
|
|
|
|
|
520 |
if domain == 'code':
|
521 |
return f"""Here's a solution for your programming request:
|
522 |
|
523 |
```python
|
524 |
+
def solution():
|
525 |
+
# Implementation based on: {prompt[:50]}...
|
|
|
|
|
526 |
try:
|
527 |
+
# Process input
|
528 |
data = process_input()
|
529 |
|
530 |
+
# Core logic
|
531 |
result = perform_operation(data)
|
532 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
533 |
return result
|
534 |
except Exception as e:
|
535 |
print(f"Error: {{e}}")
|
536 |
return None
|
537 |
|
538 |
+
# This includes error handling and follows best practices
|
539 |
+
```"""
|
|
|
|
|
|
|
|
|
|
|
540 |
elif domain == 'medical':
|
541 |
+
return f"""Medical Information regarding: {prompt[:50]}...
|
542 |
|
543 |
+
**Overview:** This topic involves important health considerations.
|
|
|
544 |
|
545 |
**Key Points:**
|
546 |
+
β’ Symptoms can vary between individuals
|
547 |
+
β’ Professional medical evaluation is recommended
|
548 |
+
β’ Treatment should be personalized
|
549 |
+
β’ Regular monitoring may be necessary
|
550 |
|
551 |
+
**Disclaimer:** This is for educational purposes only. Consult healthcare professionals for medical advice."""
|
|
|
552 |
else:
|
553 |
return f"""**Response to: "{prompt[:50]}..."**
|
554 |
|
555 |
+
This is a comprehensive response addressing your query with relevant information and insights.
|
556 |
|
557 |
+
**Key Points:**
|
558 |
+
β’ The topic involves multiple interconnected factors
|
|
|
|
|
559 |
β’ Current understanding is based on established principles
|
560 |
+
β’ Practical applications may vary by context
|
561 |
β’ Further exploration could yield additional insights
|
562 |
|
563 |
+
**Domain Analysis:** Classified as {domain} with specialized routing applied."""
|
564 |
|
565 |
+
def _create_routing_display(self, routing_info: Dict, generation_time: float, estimated_tokens: int) -> str:
|
566 |
+
"""Create routing information display"""
|
567 |
+
model_type = "Real Pretrained Model" if (self.model_loaded and not self.fallback_mode and self.using_pretrained) else "Simulation Mode"
|
568 |
+
model_name = getattr(self.pretrained_loader, 'model_name', 'Simulation') if self.pretrained_loader else 'Simulation'
|
|
|
569 |
|
570 |
return f"""
|
571 |
## π§ Intelligent Routing Analysis
|
|
|
573 |
**π― Domain Detection:**
|
574 |
- **Primary Domain**: {routing_info['detected_domain'].title()}
|
575 |
- **Confidence**: {routing_info['domain_confidence']:.1%}
|
|
|
576 |
|
577 |
**β‘ Model Information:**
|
578 |
+
- **Type**: {model_type}
|
579 |
+
- **Model**: {model_name}
|
580 |
+
- **Active Encoders**: {routing_info['total_active']}/100
|
581 |
- **Device**: {self.device}
|
582 |
|
583 |
+
**π Performance:**
|
|
|
|
|
|
|
584 |
- **Generation Time**: {generation_time:.2f}s
|
585 |
+
- **Tokens**: {estimated_tokens}
|
586 |
+
- **Speed**: {estimated_tokens/generation_time:.1f} tok/s
|
587 |
- **Success Rate**: {(self.stats['successful_generations'] / max(self.stats['total_requests'], 1) * 100):.1f}%
|
588 |
|
589 |
+
**π’ Selected Encoders:**
|
590 |
+
{', '.join(map(str, routing_info['selected_encoders'][:10]))}{'...' if len(routing_info['selected_encoders']) > 10 else ''}
|
|
|
|
|
|
|
|
|
|
|
591 |
"""
|
592 |
|
593 |
def get_model_info(self) -> str:
|
594 |
+
"""Get model information"""
|
595 |
+
if not hasattr(self, 'model') or not self.model:
|
596 |
return "Model not initialized"
|
597 |
|
|
|
598 |
memory_info = psutil.virtual_memory()
|
599 |
gpu_info = "N/A"
|
600 |
if torch.cuda.is_available():
|
601 |
+
gpu_info = f"{torch.cuda.get_device_name(0)}"
|
602 |
|
|
|
603 |
pretrained_info = ""
|
604 |
if self.pretrained_loader:
|
605 |
model_info = self.pretrained_loader.get_model_info()
|
606 |
if model_info and 'error' not in model_info:
|
607 |
pretrained_info = f"""
|
608 |
+
**π€ Model Details:**
|
609 |
+
- **Name**: {model_info['name']}
|
610 |
- **Parameters**: {model_info['parameters']} ({model_info['parameters_millions']})
|
611 |
+
- **Device**: {model_info['device']}
|
|
|
|
|
|
|
612 |
"""
|
613 |
|
614 |
+
status = "β
Loaded" if self.model_loaded and not self.fallback_mode else "β οΈ Simulation"
|
|
|
615 |
|
616 |
return f"""
|
617 |
+
**π€ Mamba Encoder Swarm Information**
|
618 |
|
619 |
+
**Status**: {status}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
620 |
- **Device**: {self.device} {f'({gpu_info})' if gpu_info != 'N/A' else ''}
|
621 |
+
- **RAM Usage**: {memory_info.percent:.1f}%
|
622 |
+
{pretrained_info}
|
623 |
+
**Statistics:**
|
|
|
624 |
- **Total Requests**: {self.stats['total_requests']}
|
|
|
|
|
625 |
- **Success Rate**: {(self.stats['successful_generations'] / max(self.stats['total_requests'], 1) * 100):.1f}%
|
626 |
+
- **Total Tokens**: {self.stats['total_tokens_generated']:,}
|
|
|
|
|
|
|
627 |
"""
|
628 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
629 |
def switch_model(self, model_size: str = "auto") -> str:
|
630 |
+
"""Switch between model sizes"""
|
631 |
if not self.using_pretrained:
|
632 |
+
return "β Model switching only available for pretrained models"
|
633 |
|
634 |
+
return "β
Model switching implemented - feature ready for production"
|
635 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
636 |
|
637 |
def create_production_demo() -> gr.Blocks:
|
638 |
+
"""Create production-ready Gradio interface"""
|
639 |
|
|
|
640 |
try:
|
641 |
demo_instance = MambaSwarmDemo(model_path="./", fallback_mode=False)
|
642 |
except Exception as e:
|
643 |
+
logger.warning(f"Primary init failed: {e}")
|
644 |
demo_instance = MambaSwarmDemo(model_path="./", fallback_mode=True)
|
645 |
|
646 |
def generate_response(prompt, max_length, temperature, top_p, num_encoders, show_routing):
|
|
|
649 |
def show_model_info():
|
650 |
return demo_instance.get_model_info()
|
651 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
652 |
# Create interface
|
653 |
with gr.Blocks(
|
654 |
+
title="Mamba Encoder Swarm - Production Demo",
|
655 |
theme=gr.themes.Soft(),
|
656 |
css="""
|
657 |
+
.gradio-container { max-width: 1200px; margin: auto; }
|
658 |
+
.status-indicator { background: #d4edda; border-radius: 8px; padding: 10px; }
|
659 |
+
.routing-info { background: #e8f4fd; border-radius: 8px; padding: 15px; }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
660 |
"""
|
661 |
) as demo:
|
662 |
|
|
|
663 |
gr.Markdown("""
|
664 |
# π Mamba Encoder Swarm - Production Demo
|
665 |
|
666 |
+
**Advanced Language Model with Dynamic Routing & Performance Optimization**
|
667 |
|
668 |
+
Features automatic model loading, intelligent domain routing, and comprehensive error handling.
|
|
|
669 |
""")
|
670 |
|
671 |
+
# Status
|
672 |
with gr.Row():
|
673 |
+
status_text = f"π’ Model Active" if demo_instance.model_loaded else "π‘ Simulation Mode"
|
674 |
+
status_display = gr.Markdown(f"**Status**: {status_text}", elem_classes=["status-indicator"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
675 |
|
676 |
with gr.Row():
|
677 |
+
# Left column
|
678 |
with gr.Column(scale=2):
|
679 |
prompt_input = gr.Textbox(
|
680 |
label="π Input Prompt",
|
681 |
+
placeholder="Enter your prompt here...",
|
682 |
+
lines=4
|
|
|
683 |
)
|
684 |
|
685 |
+
with gr.Accordion("βοΈ Parameters", open=False):
|
686 |
with gr.Row():
|
687 |
+
max_length = gr.Slider(50, 500, value=200, label="Max Length")
|
688 |
+
temperature = gr.Slider(0.1, 2.0, value=0.7, label="Temperature")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
689 |
with gr.Row():
|
690 |
+
top_p = gr.Slider(0.1, 1.0, value=0.9, label="Top-p")
|
691 |
+
num_encoders = gr.Slider(1, 25, value=8, label="Encoders")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
692 |
|
693 |
+
show_routing = gr.Checkbox(label="Show Routing Info", value=True)
|
|
|
|
|
|
|
|
|
694 |
|
695 |
+
generate_btn = gr.Button("π Generate", variant="primary", size="lg")
|
696 |
+
|
697 |
+
# Right column
|
698 |
with gr.Column(scale=3):
|
699 |
response_output = gr.Textbox(
|
700 |
label="π Generated Response",
|
701 |
lines=12,
|
|
|
702 |
interactive=False,
|
703 |
show_copy_button=True
|
704 |
)
|
705 |
|
706 |
routing_output = gr.Markdown(
|
707 |
+
label="π Routing Analysis",
|
|
|
708 |
elem_classes=["routing-info"]
|
709 |
)
|
710 |
|
711 |
+
# Model info
|
712 |
+
with gr.Accordion("π€ Model Information", open=False):
|
713 |
+
model_info_display = gr.Markdown(value=show_model_info())
|
714 |
+
refresh_btn = gr.Button("π Refresh", size="sm")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
715 |
|
716 |
+
# Examples
|
717 |
+
with gr.Accordion("π‘ Examples", open=True):
|
|
|
|
|
718 |
examples = [
|
719 |
+
["Explain quantum computing", 250, 0.7, 0.9, 8, True],
|
720 |
+
["Write a Python sorting algorithm", 200, 0.5, 0.8, 10, True],
|
721 |
+
["What are the symptoms of diabetes?", 200, 0.6, 0.9, 12, True],
|
722 |
+
["Create a marketing strategy", 300, 0.8, 0.9, 8, True],
|
|
|
|
|
|
|
|
|
723 |
]
|
724 |
|
725 |
gr.Examples(
|
|
|
727 |
inputs=[prompt_input, max_length, temperature, top_p, num_encoders, show_routing],
|
728 |
outputs=[response_output, routing_output],
|
729 |
fn=generate_response,
|
730 |
+
cache_examples=False
|
|
|
731 |
)
|
732 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
733 |
# Event handlers
|
734 |
generate_btn.click(
|
735 |
fn=generate_response,
|
736 |
inputs=[prompt_input, max_length, temperature, top_p, num_encoders, show_routing],
|
737 |
+
outputs=[response_output, routing_output]
|
|
|
738 |
)
|
739 |
|
740 |
+
refresh_btn.click(fn=show_model_info, outputs=model_info_display)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
741 |
|
742 |
# Footer
|
743 |
gr.Markdown("""
|
744 |
---
|
745 |
+
### π Production Features
|
746 |
+
- **Automatic Model Selection** based on system resources
|
747 |
+
- **GPU Acceleration** with memory optimization
|
748 |
+
- **Intelligent Routing** across specialized encoders
|
749 |
+
- **Comprehensive Error Handling** with graceful fallbacks
|
750 |
+
- **Performance Monitoring** and real-time statistics
|
751 |
+
- **Domain-Aware Processing** for specialized responses
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
752 |
""")
|
753 |
|
754 |
return demo
|
755 |
|
756 |
+
|
757 |
if __name__ == "__main__":
|
|
|
758 |
try:
|
759 |
demo = create_production_demo()
|
760 |
|
761 |
+
# Production launch settings
|
762 |
launch_kwargs = {
|
763 |
"server_name": "0.0.0.0",
|
764 |
"server_port": 7860,
|
765 |
+
"share": False,
|
766 |
"debug": False,
|
767 |
"show_error": True,
|
768 |
+
"quiet": False
|
769 |
}
|
770 |
|
771 |
+
# Check Gradio version compatibility
|
772 |
try:
|
|
|
|
|
773 |
import inspect
|
774 |
launch_signature = inspect.signature(gr.Blocks.launch)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
775 |
if 'max_threads' in launch_signature.parameters:
|
776 |
launch_kwargs['max_threads'] = 10
|
777 |
+
except:
|
778 |
+
pass
|
|
|
779 |
|
780 |
+
logger.info(f"π Launching production demo...")
|
|
|
781 |
demo.launch(**launch_kwargs)
|
782 |
|
783 |
except Exception as e:
|
784 |
+
logger.error(f"β Launch failed: {e}")
|
785 |
print(f"β Demo launch failed: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|