Malaji71 commited on
Commit
6715d2b
·
verified ·
1 Parent(s): 8723d89

Update config.py

Browse files
Files changed (1) hide show
  1. config.py +87 -32
config.py CHANGED
@@ -1,6 +1,6 @@
1
  """
2
- Configuration file for FLUX Prompt Optimizer
3
- Clean, simple, and focused configuration
4
  """
5
 
6
  import os
@@ -9,67 +9,103 @@ from typing import Dict, Any
9
 
10
  # Application Configuration
11
  APP_CONFIG = {
12
- "title": "🚀 FLUX Prompt Optimizer",
13
- "description": "Advanced image analysis and Flux prompt optimization",
14
  "version": "2.0.0",
15
- "author": "Pariente AI Research"
16
  }
17
 
18
- # Model Configuration
19
- MODEL_CONFIG = {
20
- # Primary analysis model - choose one
21
- "primary_model": "florence2", # or "bagel"
 
 
22
 
23
- # Florence-2 settings
24
- "florence2": {
25
- "model_id": "microsoft/Florence-2-base",
26
- "torch_dtype": torch.float16,
27
- "device_map": "auto",
28
- "trust_remote_code": True,
29
- "max_new_tokens": 1024
30
- },
31
 
32
- # Bagel-7B settings (via API)
33
- "bagel": {
34
- "api_url": "https://huggingface.co/spaces/Malaji71/Bagel-7B-Demo",
35
- "timeout": 30,
36
- "max_retries": 3
37
- }
 
 
 
38
  }
39
 
40
- # Device Configuration
41
  def get_device_config() -> Dict[str, Any]:
42
- """Determine optimal device configuration"""
43
  device_config = {
44
  "device": "cpu",
45
  "use_gpu": False,
 
46
  "memory_efficient": True
47
  }
48
 
49
  if torch.cuda.is_available():
 
50
  device_config.update({
51
  "device": "cuda",
52
  "use_gpu": True,
53
- "gpu_memory_gb": torch.cuda.get_device_properties(0).total_memory / 1e9
 
 
54
  })
55
  elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
56
  device_config.update({
57
  "device": "mps",
58
- "use_gpu": True
 
59
  })
60
 
61
  return device_config
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  # Processing Configuration
64
  PROCESSING_CONFIG = {
65
  "max_image_size": 1024,
66
  "image_quality": 95,
67
  "supported_formats": [".jpg", ".jpeg", ".png", ".webp"],
68
  "batch_size": 1,
69
- "timeout_seconds": 60
70
  }
71
 
72
- # Flux Prompt Rules
73
  FLUX_RULES = {
74
  "remove_patterns": [
75
  r',\s*trending on artstation',
@@ -121,16 +157,35 @@ ENVIRONMENT = {
121
  "is_spaces": os.getenv("SPACE_ID") is not None,
122
  "is_local": os.getenv("SPACE_ID") is None,
123
  "log_level": os.getenv("LOG_LEVEL", "INFO"),
124
- "debug_mode": os.getenv("DEBUG", "false").lower() == "true"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  }
126
 
127
  # Export main configurations
128
  __all__ = [
129
  "APP_CONFIG",
130
- "MODEL_CONFIG",
131
  "get_device_config",
 
132
  "PROCESSING_CONFIG",
133
  "FLUX_RULES",
134
  "SCORING_CONFIG",
135
- "ENVIRONMENT"
 
 
136
  ]
 
1
  """
2
+ Configuration file for Frame 0 Laboratory for MIA
3
+ BAGEL 7B integration with FLUX prompt optimization
4
  """
5
 
6
  import os
 
9
 
10
  # Application Configuration
11
  APP_CONFIG = {
12
+ "title": "Frame 0 Laboratory for MIA",
13
+ "description": "Advanced image analysis with BAGEL 7B and FLUX prompt optimization",
14
  "version": "2.0.0",
15
+ "author": "Frame 0 Laboratory for MIA"
16
  }
17
 
18
+ # BAGEL Model Configuration
19
+ BAGEL_CONFIG = {
20
+ "model_repo": "ByteDance-Seed/BAGEL-7B-MoT",
21
+ "local_model_path": "./model",
22
+ "cache_dir": "./model/cache",
23
+ "download_patterns": ["*.json", "*.safetensors", "*.bin", "*.py", "*.md", "*.txt"],
24
 
25
+ # Model parameters
26
+ "dtype": torch.bfloat16,
27
+ "device_map_strategy": "auto",
28
+ "max_memory_per_gpu": "80GiB",
29
+ "offload_buffers": True,
30
+ "force_hooks": True,
 
 
31
 
32
+ # Image processing
33
+ "vae_transform_size": (1024, 512, 16),
34
+ "vit_transform_size": (980, 224, 14),
35
+
36
+ # Inference parameters
37
+ "max_new_tokens": 512,
38
+ "temperature": 0.7,
39
+ "top_p": 0.9,
40
+ "do_sample": True
41
  }
42
 
43
+ # Device Configuration for ZeroGPU
44
  def get_device_config() -> Dict[str, Any]:
45
+ """Determine optimal device configuration for BAGEL"""
46
  device_config = {
47
  "device": "cpu",
48
  "use_gpu": False,
49
+ "gpu_count": 0,
50
  "memory_efficient": True
51
  }
52
 
53
  if torch.cuda.is_available():
54
+ gpu_count = torch.cuda.device_count()
55
  device_config.update({
56
  "device": "cuda",
57
  "use_gpu": True,
58
+ "gpu_count": gpu_count,
59
+ "gpu_memory_gb": torch.cuda.get_device_properties(0).total_memory / 1e9,
60
+ "multi_gpu": gpu_count > 1
61
  })
62
  elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
63
  device_config.update({
64
  "device": "mps",
65
+ "use_gpu": True,
66
+ "gpu_count": 1
67
  })
68
 
69
  return device_config
70
 
71
+ # BAGEL Device Mapping Configuration
72
+ def get_bagel_device_map(gpu_count: int) -> Dict[str, str]:
73
+ """Configure device mapping for BAGEL model"""
74
+ # Same device modules that need to be on the same GPU
75
+ same_device_modules = [
76
+ 'language_model.model.embed_tokens',
77
+ 'time_embedder',
78
+ 'latent_pos_embed',
79
+ 'vae2llm',
80
+ 'llm2vae',
81
+ 'connector',
82
+ 'vit_pos_embed'
83
+ ]
84
+
85
+ device_map = {}
86
+
87
+ if gpu_count == 1:
88
+ # Single GPU configuration
89
+ for module in same_device_modules:
90
+ device_map[module] = "cuda:0"
91
+ else:
92
+ # Multi-GPU configuration - keep critical modules on same device
93
+ first_device = "cuda:0"
94
+ for module in same_device_modules:
95
+ device_map[module] = first_device
96
+
97
+ return device_map
98
+
99
  # Processing Configuration
100
  PROCESSING_CONFIG = {
101
  "max_image_size": 1024,
102
  "image_quality": 95,
103
  "supported_formats": [".jpg", ".jpeg", ".png", ".webp"],
104
  "batch_size": 1,
105
+ "timeout_seconds": 120 # Increased for BAGEL processing
106
  }
107
 
108
+ # FLUX Prompt Rules
109
  FLUX_RULES = {
110
  "remove_patterns": [
111
  r',\s*trending on artstation',
 
157
  "is_spaces": os.getenv("SPACE_ID") is not None,
158
  "is_local": os.getenv("SPACE_ID") is None,
159
  "log_level": os.getenv("LOG_LEVEL", "INFO"),
160
+ "debug_mode": os.getenv("DEBUG", "false").lower() == "true",
161
+ "space_id": os.getenv("SPACE_ID", ""),
162
+ "space_author": os.getenv("SPACE_AUTHOR_NAME", "")
163
+ }
164
+
165
+ # BAGEL Inference Prompts
166
+ BAGEL_PROMPTS = {
167
+ "image_analysis": "Describe this image in detail, including objects, people, setting, mood, and visual elements:",
168
+ "flux_prompt": "Generate a detailed FLUX prompt for this image, focusing on photographic and artistic elements:",
169
+ "detailed_description": "Provide a comprehensive analysis of this image including composition, lighting, colors, and artistic style:",
170
+ }
171
+
172
+ # Flash Attention Installation Command
173
+ FLASH_ATTN_INSTALL = {
174
+ "command": "pip install flash-attn --no-build-isolation",
175
+ "env": {"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
176
+ "shell": True
177
  }
178
 
179
  # Export main configurations
180
  __all__ = [
181
  "APP_CONFIG",
182
+ "BAGEL_CONFIG",
183
  "get_device_config",
184
+ "get_bagel_device_map",
185
  "PROCESSING_CONFIG",
186
  "FLUX_RULES",
187
  "SCORING_CONFIG",
188
+ "ENVIRONMENT",
189
+ "BAGEL_PROMPTS",
190
+ "FLASH_ATTN_INSTALL"
191
  ]