Spaces:
Running
on
Zero
Running
on
Zero
| """ | |
| Main processing logic for Phramer AI | |
| By Pariente AI, for MIA TV Series | |
| Enhanced image analysis with professional cinematography integration and multi-engine optimization | |
| """ | |
| import logging | |
| import time | |
| from typing import Tuple, Dict, Any, Optional | |
| from PIL import Image | |
| from datetime import datetime | |
| from config import APP_CONFIG, PROCESSING_CONFIG, get_device_config, PROFESSIONAL_PHOTOGRAPHY_CONFIG | |
| from utils import ( | |
| optimize_image, validate_image, apply_flux_rules, | |
| calculate_prompt_score, get_score_grade, format_analysis_report, | |
| clean_memory, safe_execute, detect_scene_type_from_analysis, | |
| enhance_prompt_with_cinematography_knowledge | |
| ) | |
| from models import analyze_image | |
| logger = logging.getLogger(__name__) | |
| class PhramerlAIOptimizer: | |
| """Main optimizer class for Phramer AI prompt generation with cinematography integration""" | |
| def __init__(self, model_name: str = None): | |
| self.model_name = model_name | |
| self.device_config = get_device_config() | |
| self.processing_stats = { | |
| "total_processed": 0, | |
| "successful_analyses": 0, | |
| "failed_analyses": 0, | |
| "average_processing_time": 0.0, | |
| "cinematography_enhancements": 0, | |
| "scene_types_detected": {} | |
| } | |
| logger.info(f"Phramer AI Optimizer initialized - Device: {self.device_config['device']}") | |
| def process_image(self, image: Any, analysis_type: str = "multiengine") -> Tuple[str, str, str, Dict[str, Any]]: | |
| """ | |
| Complete image processing pipeline with cinematography enhancement | |
| Args: | |
| image: Input image (PIL, numpy array, or file path) | |
| analysis_type: Type of analysis ("multiengine", "cinematic", "flux") | |
| Returns: | |
| Tuple of (optimized_prompt, analysis_report, score_html, metadata) | |
| """ | |
| start_time = time.time() | |
| metadata = { | |
| "processing_time": 0.0, | |
| "success": False, | |
| "model_used": self.model_name or "bagel-professional", | |
| "device": self.device_config["device"], | |
| "analysis_type": analysis_type, | |
| "cinematography_enhanced": False, | |
| "scene_type": "unknown", | |
| "error": None | |
| } | |
| try: | |
| # Step 1: Validate and optimize input image | |
| logger.info(f"Starting Phramer AI processing pipeline - Analysis type: {analysis_type}") | |
| if not validate_image(image): | |
| error_msg = "Invalid or unsupported image format" | |
| logger.error(error_msg) | |
| return self._create_error_response(error_msg, metadata) | |
| optimized_image = optimize_image(image) | |
| if optimized_image is None: | |
| error_msg = "Image optimization failed" | |
| logger.error(error_msg) | |
| return self._create_error_response(error_msg, metadata) | |
| logger.info(f"Image optimized to size: {optimized_image.size}") | |
| # Step 2: Enhanced image analysis with cinematography context | |
| logger.info("Running enhanced BAGEL analysis with cinematography integration...") | |
| analysis_success, analysis_result = safe_execute( | |
| analyze_image, | |
| optimized_image, | |
| self.model_name, | |
| analysis_type | |
| ) | |
| if not analysis_success: | |
| error_msg = f"Enhanced image analysis failed: {analysis_result}" | |
| logger.error(error_msg) | |
| return self._create_error_response(error_msg, metadata) | |
| description, analysis_metadata = analysis_result | |
| logger.info(f"Enhanced analysis complete: {len(description)} characters") | |
| # Step 3: Detect scene type and apply cinematography enhancements | |
| scene_type = detect_scene_type_from_analysis(analysis_metadata) | |
| metadata["scene_type"] = scene_type | |
| # Update scene statistics | |
| if scene_type in self.processing_stats["scene_types_detected"]: | |
| self.processing_stats["scene_types_detected"][scene_type] += 1 | |
| else: | |
| self.processing_stats["scene_types_detected"][scene_type] = 1 | |
| logger.info(f"Scene type detected: {scene_type}") | |
| # Step 4: Apply enhanced FLUX optimization with cinematography knowledge | |
| logger.info("Applying enhanced multi-engine optimization...") | |
| optimized_prompt = apply_flux_rules(description, analysis_metadata) | |
| # Step 5: Additional cinematography enhancement if enabled | |
| if PROFESSIONAL_PHOTOGRAPHY_CONFIG.get("enable_expert_analysis", True): | |
| logger.info("Applying professional cinematography enhancement...") | |
| optimized_prompt = enhance_prompt_with_cinematography_knowledge(optimized_prompt, scene_type) | |
| metadata["cinematography_enhanced"] = True | |
| self.processing_stats["cinematography_enhancements"] += 1 | |
| if not optimized_prompt: | |
| optimized_prompt = "A professional cinematic photograph with technical excellence" | |
| logger.warning("Empty prompt after optimization, using cinematography fallback") | |
| # Step 6: Calculate enhanced quality score | |
| logger.info("Calculating professional quality score...") | |
| score, score_breakdown = calculate_prompt_score(optimized_prompt, analysis_metadata) | |
| grade_info = get_score_grade(score) | |
| # Step 7: Generate comprehensive analysis report | |
| processing_time = time.time() - start_time | |
| metadata.update({ | |
| "processing_time": processing_time, | |
| "success": True, | |
| "prompt_length": len(optimized_prompt), | |
| "score": score, | |
| "grade": grade_info["grade"], | |
| "analysis_metadata": analysis_metadata, | |
| "score_breakdown": score_breakdown, | |
| "has_camera_suggestion": analysis_metadata.get("has_camera_suggestion", False), | |
| "professional_enhancement": analysis_metadata.get("professional_enhancement", False) | |
| }) | |
| analysis_report = self._generate_enhanced_report( | |
| optimized_prompt, analysis_metadata, score, | |
| score_breakdown, processing_time, scene_type | |
| ) | |
| # Step 8: Create enhanced score HTML | |
| score_html = self._generate_enhanced_score_html(score, grade_info, scene_type) | |
| # Update statistics | |
| self._update_stats(processing_time, True) | |
| logger.info(f"Phramer AI processing complete - Scene: {scene_type}, Score: {score}, Time: {processing_time:.1f}s") | |
| return optimized_prompt, analysis_report, score_html, metadata | |
| except Exception as e: | |
| processing_time = time.time() - start_time | |
| error_msg = f"Unexpected error in Phramer AI pipeline: {str(e)}" | |
| logger.error(error_msg, exc_info=True) | |
| metadata.update({ | |
| "processing_time": processing_time, | |
| "error": error_msg | |
| }) | |
| self._update_stats(processing_time, False) | |
| return self._create_error_response(error_msg, metadata) | |
| finally: | |
| # Always clean up memory | |
| clean_memory() | |
| def process_for_cinematic(self, image: Any) -> Tuple[str, str, str, Dict[str, Any]]: | |
| """Process image specifically for cinematic/MIA TV Series production""" | |
| return self.process_image(image, analysis_type="cinematic") | |
| def process_for_flux(self, image: Any) -> Tuple[str, str, str, Dict[str, Any]]: | |
| """Process image specifically for FLUX generation""" | |
| return self.process_image(image, analysis_type="flux") | |
| def process_for_multiengine(self, image: Any) -> Tuple[str, str, str, Dict[str, Any]]: | |
| """Process image for multi-engine compatibility (Flux, Midjourney, etc.)""" | |
| return self.process_image(image, analysis_type="multiengine") | |
| def _create_error_response(self, error_msg: str, metadata: Dict[str, Any]) -> Tuple[str, str, str, Dict[str, Any]]: | |
| """Create standardized error response""" | |
| error_prompt = "❌ Phramer AI processing failed" | |
| error_report = f"""**Error:** {error_msg} | |
| **Troubleshooting:** | |
| • Verify image format (JPG, PNG, WebP) | |
| • Check image size (max 1024px) | |
| • Ensure stable internet connection | |
| • Try with a different image | |
| **Support:** Contact Pariente AI technical team""" | |
| error_html = self._generate_enhanced_score_html(0, get_score_grade(0), "error") | |
| metadata["success"] = False | |
| metadata["error"] = error_msg | |
| return error_prompt, error_report, error_html, metadata | |
| def _generate_enhanced_report(self, prompt: str, analysis_metadata: Dict[str, Any], | |
| score: int, breakdown: Dict[str, int], | |
| processing_time: float, scene_type: str) -> str: | |
| """Generate comprehensive analysis report with cinematography insights""" | |
| model_used = analysis_metadata.get("model", "Unknown") | |
| device_used = analysis_metadata.get("device", self.device_config["device"]) | |
| confidence = analysis_metadata.get("confidence", 0.0) | |
| has_cinema_context = analysis_metadata.get("cinematography_context_applied", False) | |
| camera_setup = analysis_metadata.get("camera_setup", "Not detected") | |
| # Device status emoji | |
| device_emoji = "⚡" if device_used == "cuda" else "💻" | |
| cinema_emoji = "🎬" if has_cinema_context else "📸" | |
| report = f"""**{cinema_emoji} PHRAMER AI ANALYSIS COMPLETE** | |
| **Processing:** {device_emoji} {device_used.upper()} • {processing_time:.1f}s • Model: {model_used} | |
| **Score:** {score}/100 • Scene: {scene_type.replace('_', ' ').title()} • Confidence: {confidence:.0%} | |
| **🎯 SCORE BREAKDOWN:** | |
| • **Prompt Quality:** {breakdown.get('prompt_quality', 0)}/25 - Content detail and structure | |
| • **Technical Details:** {breakdown.get('technical_details', 0)}/25 - Camera and equipment specs | |
| • **Professional Cinematography:** {breakdown.get('professional_cinematography', 0)}/25 - Cinema expertise applied | |
| • **Multi-Engine Optimization:** {breakdown.get('multi_engine_optimization', 0)}/25 - Platform compatibility | |
| **🎬 CINEMATOGRAPHY ANALYSIS:** | |
| **Scene Type:** {scene_type.replace('_', ' ').title()} | |
| **Camera Setup:** {camera_setup} | |
| **Professional Context:** {'✅ Applied' if has_cinema_context else '❌ Basic'} | |
| **⚙️ OPTIMIZATIONS APPLIED:** | |
| ✅ Professional camera configuration | |
| ✅ Cinematography lighting principles | |
| ✅ Technical specifications enhanced | |
| ✅ Multi-engine compatibility (Flux, Midjourney) | |
| ✅ Cinema-quality terminology | |
| ✅ Scene-specific enhancements | |
| **📊 PERFORMANCE METRICS:** | |
| • **Processing Time:** {processing_time:.1f}s | |
| • **Device:** {device_used.upper()} | |
| • **Model Confidence:** {confidence:.0%} | |
| • **Prompt Length:** {len(prompt)} characters | |
| • **Enhancement Level:** {'Professional' if has_cinema_context else 'Standard'} | |
| **🏆 COMPATIBILITY:** | |
| • **FLUX:** ✅ Optimized | |
| • **Midjourney:** ✅ Compatible | |
| • **Stable Diffusion:** ✅ Ready | |
| • **Other Engines:** ✅ Universal format | |
| **Pariente AI • MIA TV Series • 30+ Years Cinema Experience**""" | |
| return report | |
| def _generate_enhanced_score_html(self, score: int, grade_info: Dict[str, str], scene_type: str) -> str: | |
| """Generate enhanced HTML for score display with cinematography context""" | |
| # Scene type emoji | |
| scene_emojis = { | |
| "cinematic": "🎬", | |
| "portrait": "👤", | |
| "landscape": "🏔️", | |
| "street": "🏙️", | |
| "architectural": "🏛️", | |
| "commercial": "💼", | |
| "error": "❌" | |
| } | |
| scene_emoji = scene_emojis.get(scene_type, "📸") | |
| html = f''' | |
| <div style="text-align: center; padding: 2rem; background: linear-gradient(135deg, #f0fdf4 0%, #dcfce7 100%); border: 3px solid {grade_info["color"]}; border-radius: 16px; margin: 1rem 0; box-shadow: 0 8px 25px -5px rgba(0, 0, 0, 0.1);"> | |
| <div style="font-size: 2.5rem; margin-bottom: 0.5rem;">{scene_emoji}</div> | |
| <div style="font-size: 3rem; font-weight: 800; color: {grade_info["color"]}; margin: 0; text-shadow: 0 2px 4px rgba(0,0,0,0.1);">{score}</div> | |
| <div style="font-size: 1.25rem; color: #15803d; margin: 0.5rem 0; text-transform: uppercase; letter-spacing: 0.1em; font-weight: 700;">{grade_info["grade"]}</div> | |
| <div style="font-size: 0.9rem; color: #15803d; margin: 0; text-transform: capitalize; letter-spacing: 0.05em; font-weight: 500;">{scene_type.replace('_', ' ')} Scene</div> | |
| <div style="font-size: 0.8rem; color: #15803d; margin: 0.5rem 0 0 0; text-transform: uppercase; letter-spacing: 0.05em; font-weight: 500;">Phramer AI Quality</div> | |
| </div> | |
| ''' | |
| return html | |
| def _update_stats(self, processing_time: float, success: bool) -> None: | |
| """Update processing statistics with cinematography tracking""" | |
| self.processing_stats["total_processed"] += 1 | |
| if success: | |
| self.processing_stats["successful_analyses"] += 1 | |
| else: | |
| self.processing_stats["failed_analyses"] += 1 | |
| # Update rolling average of processing time | |
| current_avg = self.processing_stats["average_processing_time"] | |
| total_count = self.processing_stats["total_processed"] | |
| self.processing_stats["average_processing_time"] = ( | |
| (current_avg * (total_count - 1) + processing_time) / total_count | |
| ) | |
| def get_enhanced_stats(self) -> Dict[str, Any]: | |
| """Get enhanced processing statistics with cinematography insights""" | |
| stats = self.processing_stats.copy() | |
| if stats["total_processed"] > 0: | |
| stats["success_rate"] = stats["successful_analyses"] / stats["total_processed"] | |
| stats["cinematography_enhancement_rate"] = stats["cinematography_enhancements"] / stats["total_processed"] | |
| else: | |
| stats["success_rate"] = 0.0 | |
| stats["cinematography_enhancement_rate"] = 0.0 | |
| stats["device_info"] = self.device_config | |
| stats["most_common_scene"] = max(stats["scene_types_detected"].items(), key=lambda x: x[1])[0] if stats["scene_types_detected"] else "none" | |
| return stats | |
| def reset_stats(self) -> None: | |
| """Reset processing statistics""" | |
| self.processing_stats = { | |
| "total_processed": 0, | |
| "successful_analyses": 0, | |
| "failed_analyses": 0, | |
| "average_processing_time": 0.0, | |
| "cinematography_enhancements": 0, | |
| "scene_types_detected": {} | |
| } | |
| logger.info("Phramer AI processing statistics reset") | |
| class CinematicBatchProcessor: | |
| """Handle batch processing for MIA TV Series production""" | |
| def __init__(self, optimizer: PhramerlAIOptimizer): | |
| self.optimizer = optimizer | |
| self.batch_results = [] | |
| self.batch_stats = { | |
| "total_images": 0, | |
| "successful_analyses": 0, | |
| "scene_type_distribution": {}, | |
| "average_score": 0.0, | |
| "processing_time_total": 0.0 | |
| } | |
| def process_cinematic_batch(self, images: list, analysis_type: str = "cinematic") -> list: | |
| """Process multiple images for cinematic production""" | |
| results = [] | |
| total_score = 0 | |
| successful_count = 0 | |
| logger.info(f"Starting cinematic batch processing: {len(images)} images") | |
| for i, image in enumerate(images): | |
| logger.info(f"Processing cinematic batch item {i+1}/{len(images)}") | |
| try: | |
| if analysis_type == "cinematic": | |
| result = self.optimizer.process_for_cinematic(image) | |
| elif analysis_type == "flux": | |
| result = self.optimizer.process_for_flux(image) | |
| else: | |
| result = self.optimizer.process_for_multiengine(image) | |
| success = result[3]["success"] | |
| if success: | |
| score = result[3].get("score", 0) | |
| scene_type = result[3].get("scene_type", "unknown") | |
| total_score += score | |
| successful_count += 1 | |
| # Update scene distribution | |
| if scene_type in self.batch_stats["scene_type_distribution"]: | |
| self.batch_stats["scene_type_distribution"][scene_type] += 1 | |
| else: | |
| self.batch_stats["scene_type_distribution"][scene_type] = 1 | |
| results.append({ | |
| "index": i, | |
| "success": success, | |
| "result": result, | |
| "scene_type": result[3].get("scene_type", "unknown"), | |
| "score": result[3].get("score", 0) | |
| }) | |
| except Exception as e: | |
| logger.error(f"Cinematic batch item {i} failed: {e}") | |
| results.append({ | |
| "index": i, | |
| "success": False, | |
| "error": str(e), | |
| "scene_type": "error", | |
| "score": 0 | |
| }) | |
| # Update batch statistics | |
| self.batch_stats.update({ | |
| "total_images": len(images), | |
| "successful_analyses": successful_count, | |
| "average_score": total_score / successful_count if successful_count > 0 else 0.0 | |
| }) | |
| self.batch_results = results | |
| logger.info(f"Cinematic batch processing complete: {successful_count}/{len(images)} successful") | |
| return results | |
| def get_cinematic_batch_summary(self) -> Dict[str, Any]: | |
| """Get comprehensive summary of cinematic batch processing""" | |
| if not self.batch_results: | |
| return {"total": 0, "successful": 0, "failed": 0, "average_score": 0.0} | |
| successful = sum(1 for r in self.batch_results if r["success"]) | |
| total = len(self.batch_results) | |
| summary = { | |
| "total": total, | |
| "successful": successful, | |
| "failed": total - successful, | |
| "success_rate": successful / total if total > 0 else 0.0, | |
| "average_score": self.batch_stats["average_score"], | |
| "scene_distribution": self.batch_stats["scene_type_distribution"], | |
| "most_common_scene": max(self.batch_stats["scene_type_distribution"].items(), key=lambda x: x[1])[0] if self.batch_stats["scene_type_distribution"] else "none" | |
| } | |
| return summary | |
| # Global optimizer instance for Phramer AI | |
| phramer_optimizer = PhramerlAIOptimizer() | |
| def process_image_simple(image: Any, model_name: str = None, analysis_type: str = "multiengine") -> Tuple[str, str, str]: | |
| """ | |
| Simple interface for Phramer AI image processing | |
| Args: | |
| image: Input image | |
| model_name: Optional model name | |
| analysis_type: Type of analysis ("multiengine", "cinematic", "flux") | |
| Returns: | |
| Tuple of (prompt, report, score_html) | |
| """ | |
| if model_name and model_name != phramer_optimizer.model_name: | |
| # Create temporary optimizer with specified model | |
| temp_optimizer = PhramerlAIOptimizer(model_name) | |
| prompt, report, score_html, _ = temp_optimizer.process_image(image, analysis_type) | |
| else: | |
| prompt, report, score_html, _ = phramer_optimizer.process_image(image, analysis_type) | |
| return prompt, report, score_html | |
| def process_for_mia_tv_series(image: Any) -> Tuple[str, str, str]: | |
| """ | |
| Specialized processing for MIA TV Series production | |
| Args: | |
| image: Input image | |
| Returns: | |
| Tuple of (cinematic_prompt, detailed_report, score_html) | |
| """ | |
| return phramer_optimizer.process_for_cinematic(image)[:3] | |
| def get_phramer_stats() -> Dict[str, Any]: | |
| """Get comprehensive Phramer AI processing statistics""" | |
| return phramer_optimizer.get_enhanced_stats() | |
| # Export main components | |
| __all__ = [ | |
| "PhramerlAIOptimizer", | |
| "CinematicBatchProcessor", | |
| "phramer_optimizer", | |
| "process_image_simple", | |
| "process_for_mia_tv_series", | |
| "get_phramer_stats" | |
| ] |