import gradio as gr from video_processor.processor import VideoAnalyzer import logging import torch import spaces # Configure logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # Print version information logger.info(f"PyTorch version: {torch.__version__}") logger.info(f"CUDA available: {torch.cuda.is_available()}") if torch.cuda.is_available(): logger.info(f"CUDA version: {torch.version.cuda}") logger.info(f"GPU device: {torch.cuda.get_device_name(0)}") @spaces.GPU def on_process(video): # Clear all components when starting new processing yield [ "", # Clear status "", # Clear description gr.update(visible=False) # Hide accordion ] if not video: yield [ "Please upload a video", "", gr.update(visible=False) ] return try: # Initialize analyzer yield [ "Initializing video analyzer...", "", gr.update(visible=False) ] analyzer = VideoAnalyzer() # Process video yield [ "Analyzing video content...", "", gr.update(visible=True) ] logger.info(f"Processing video: {video}") result = analyzer.process_video(video) description = result[0]["description"] # Format output formatted_desc = f"### Analysis:\n{description}" yield [ "Processing complete!", formatted_desc, gr.update(visible=True) ] except Exception as e: logger.exception("Error processing video") yield [ f"Error processing video: {str(e)}", "", gr.update(visible=False) ] finally: # Clean up torch.cuda.empty_cache() # Create Gradio interface with gr.Blocks() as demo: gr.Markdown("# SmolVLM Video Analyzer") gr.Markdown("Upload a video to get a detailed analysis of its content.") with gr.Row(): with gr.Column(scale=1): input_video = gr.Video( label="Upload your video", interactive=True ) process_btn = gr.Button("Process Video", variant="primary") with gr.Column(scale=1): status = gr.Markdown() analysis_accordion = gr.Accordion( "Analysis Details", open=True, visible=False ) with analysis_accordion: video_description = gr.Markdown("") process_btn.click( on_process, inputs=[input_video], outputs=[ status, video_description, analysis_accordion ], queue=True, ) if __name__ == "__main__": demo.launch( server_name="0.0.0.0", server_port=7860, share=False )