File size: 3,657 Bytes
8535875
 
678fc41
8535875
678fc41
 
 
 
 
 
8535875
678fc41
 
 
8535875
678fc41
 
 
8535875
678fc41
 
 
 
 
 
 
8535875
678fc41
 
 
 
 
 
 
8535875
678fc41
8535875
0f519d6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8535875
0f519d6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8535875
678fc41
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import gradio as gr
import numpy as np
import cv2

def create_dot_effect(image, dot_size=10, spacing=2):
    # Convert to grayscale if image is color
    if len(image.shape) == 3:
        gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    else:
        gray = image
    
    # Create a blank canvas
    height, width = gray.shape
    canvas = np.zeros_like(gray)
    
    # Calculate number of dots based on spacing
    y_dots = range(0, height, dot_size + spacing)
    x_dots = range(0, width, dot_size + spacing)
    
    # Create dots based on brightness
    for y in y_dots:
        for x in x_dots:
            # Get the average brightness of the region
            region = gray[y:min(y+dot_size, height), x:min(x+dot_size, width)]
            if region.size > 0:
                brightness = np.mean(region)
                
                # Draw circle if the region is bright enough
                if brightness > 30:  # Threshold can be adjusted
                    cv2.circle(canvas, 
                             (x + dot_size//2, y + dot_size//2), 
                             dot_size//2, 
                             (255), 
                             -1)
    
    return canvas

def process_video(video_path, dot_size=10, spacing=2):
    # Read the video
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        return None

    # Get video properties
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    
    # Create temporary output file with mp4v codec
    output_path = "temp_output.mp4"
    fourcc = cv2.VideoWriter_fourcc(*'avc1')  # Changed from 'mp4v' to 'avc1' (h264 codec)
    out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height), False)

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
            
        # Convert BGR to RGB for processing
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        # Apply dot effect
        dotted_frame = create_dot_effect(frame_rgb, dot_size, spacing)
        out.write(dotted_frame)

    cap.release()
    out.release()
    
    return output_path

# Create Gradio interface
with gr.Blocks(title="ChatGPT Ad Maker") as iface:
    gr.Markdown("# ChatGPT Ad Maker")
    gr.Markdown("Convert your image or video into a dotted pattern. Adjust dot size and spacing using the sliders.")
    
    with gr.Tab("Image"):
        image_input = gr.Image(label="Input Image")
        with gr.Row():
            img_dot_size = gr.Slider(minimum=2, maximum=20, value=10, step=1, label="Dot Size")
            img_spacing = gr.Slider(minimum=0, maximum=10, value=2, step=1, label="Dot Spacing")
        image_output = gr.Image(label="Dotted Output")
        image_button = gr.Button("Process Image")
        image_button.click(
            fn=create_dot_effect,
            inputs=[image_input, img_dot_size, img_spacing],
            outputs=image_output
        )
    
    with gr.Tab("Video"):
        video_input = gr.Video(label="Input Video")
        with gr.Row():
            vid_dot_size = gr.Slider(minimum=2, maximum=20, value=10, step=1, label="Dot Size")
            vid_spacing = gr.Slider(minimum=0, maximum=10, value=2, step=1, label="Dot Spacing")
        video_output = gr.Video(label="Dotted Output", format="mp4")
        video_button = gr.Button("Process Video")
        video_button.click(
            fn=process_video,
            inputs=[video_input, vid_dot_size, vid_spacing],
            outputs=video_output
        )

if __name__ == "__main__":
    iface.launch()