File size: 3,823 Bytes
7cdab93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
"""
Configuration file for FLUX Prompt Optimizer
Clean, simple, and focused configuration
"""

import os
import torch
from typing import Dict, Any

# Application Configuration
APP_CONFIG = {
    "title": "🚀 FLUX Prompt Optimizer",
    "description": "Advanced image analysis and Flux prompt optimization",
    "version": "2.0.0",
    "author": "Pariente AI Research"
}

# Model Configuration
MODEL_CONFIG = {
    # Primary analysis model - choose one
    "primary_model": "florence2",  # or "bagel" 
    
    # Florence-2 settings
    "florence2": {
        "model_id": "microsoft/Florence-2-base",
        "torch_dtype": torch.float16,
        "device_map": "auto",
        "trust_remote_code": True,
        "max_new_tokens": 1024
    },
    
    # Bagel-7B settings (via API)
    "bagel": {
        "api_url": "https://huggingface.co/spaces/Malaji71/Bagel-7B-Demo",
        "timeout": 30,
        "max_retries": 3
    }
}

# Device Configuration
def get_device_config() -> Dict[str, Any]:
    """Determine optimal device configuration"""
    device_config = {
        "device": "cpu",
        "use_gpu": False,
        "memory_efficient": True
    }
    
    if torch.cuda.is_available():
        device_config.update({
            "device": "cuda",
            "use_gpu": True,
            "gpu_memory_gb": torch.cuda.get_device_properties(0).total_memory / 1e9
        })
    elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
        device_config.update({
            "device": "mps",
            "use_gpu": True
        })
    
    return device_config

# Processing Configuration
PROCESSING_CONFIG = {
    "max_image_size": 1024,
    "image_quality": 95,
    "supported_formats": [".jpg", ".jpeg", ".png", ".webp"],
    "batch_size": 1,
    "timeout_seconds": 60
}

# Flux Prompt Rules
FLUX_RULES = {
    "remove_patterns": [
        r',\s*trending on artstation',
        r',\s*trending on [^,]+',
        r',\s*\d+k\s*',
        r',\s*\d+k resolution',
        r',\s*artstation',
        r',\s*concept art',
        r',\s*digital art',
        r',\s*by greg rutkowski',
    ],
    
    "camera_configs": {
        "portrait": ", Shot on Hasselblad X2D 100C, 90mm f/2.5 lens at f/2.8, professional portrait photography",
        "landscape": ", Shot on Phase One XT, 40mm f/4 lens at f/8, epic landscape photography", 
        "street": ", Shot on Leica M11, 35mm f/1.4 lens at f/2.8, documentary street photography",
        "default": ", Shot on Phase One XF IQ4, 80mm f/2.8 lens at f/4, professional photography"
    },
    
    "lighting_enhancements": {
        "dramatic": ", dramatic cinematic lighting",
        "portrait": ", professional studio lighting with subtle rim light",
        "default": ", masterful natural lighting"
    }
}

# Scoring Configuration
SCORING_CONFIG = {
    "max_score": 100,
    "score_weights": {
        "prompt_quality": 0.3,
        "technical_details": 0.25,
        "artistic_value": 0.25,
        "flux_optimization": 0.2
    },
    
    "grade_thresholds": {
        95: {"grade": "LEGENDARY", "color": "#059669"},
        90: {"grade": "EXCELLENT", "color": "#10b981"},
        80: {"grade": "VERY GOOD", "color": "#22c55e"},
        70: {"grade": "GOOD", "color": "#f59e0b"},
        60: {"grade": "FAIR", "color": "#f97316"},
        0: {"grade": "NEEDS WORK", "color": "#ef4444"}
    }
}

# Environment Configuration
ENVIRONMENT = {
    "is_spaces": os.getenv("SPACE_ID") is not None,
    "is_local": os.getenv("SPACE_ID") is None,
    "log_level": os.getenv("LOG_LEVEL", "INFO"),
    "debug_mode": os.getenv("DEBUG", "false").lower() == "true"
}

# Export main configurations
__all__ = [
    "APP_CONFIG",
    "MODEL_CONFIG", 
    "get_device_config",
    "PROCESSING_CONFIG",
    "FLUX_RULES",
    "SCORING_CONFIG",
    "ENVIRONMENT"
]