File size: 3,701 Bytes
aa91c42
fa2c889
aa91c42
 
fa2c889
aa91c42
 
 
 
 
 
 
 
 
 
22e754c
aa91c42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22e754c
aa91c42
 
 
 
 
 
22e754c
 
aa91c42
22e754c
aa91c42
22e754c
 
 
 
 
 
aa91c42
 
 
 
 
22e754c
aa91c42
22e754c
 
 
aa91c42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fa2c889
aa91c42
fa2c889
 
aa91c42
 
 
 
 
 
 
 
 
04b3484
 
aa91c42
 
 
fa2c889
aa91c42
 
 
fa2c889
 
aa91c42
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import os
import cv2
import gradio as gr
import AnimeGANv3_src
import numpy as np

class AnimeGANv3:
    def __init__(self):
        # Ensure the output directory exists
        os.makedirs('output', exist_ok=True)

    def process_frame(self, frame, style_code, det_face):
        """Process a single frame with AnimeGANv3."""
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        output = AnimeGANv3_src.Convert(frame_rgb, style_code, det_face)
        return output[:, :, ::-1]  # Convert back to BGR for OpenCV

    def inference(self, video_path, style, if_face=None):
        print(video_path, style, if_face)
        try:
            # Map style names to codes
            style_codes = {
                "AnimeGANv3_Arcane": "A",
                "AnimeGANv3_Trump v1.0": "T",
                "AnimeGANv3_Shinkai": "S",
                "AnimeGANv3_PortraitSketch": "P",
                "AnimeGANv3_Hayao": "H",
                "AnimeGANv3_Disney v1.0": "D",
                "AnimeGANv3_JP_face v1.0": "J",
                "AnimeGANv3_Kpop v2.0": "K",
            }
            style_code = style_codes.get(style, "U")
            det_face = if_face == "Yes"

            # Open the input video
            cap = cv2.VideoCapture(video_path)
            if not cap.isOpened():
                raise Exception("Could not open video file")

            # Get video properties
            fps = cap.get(cv2.CAP_PROP_FPS)
            width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            print(f"Processing {frame_count} frames at {fps} FPS, {width}x{height}")

            # Set up video writer
            save_path = "output/out.mp4"
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # Use 'mp4v' codec for MP4
            out = cv2.VideoWriter(save_path, fourcc, fps, (width, height))

            # Process frames
            while True:
                ret, frame = cap.read()
                if not ret:
                    break
                processed_frame = self.process_frame(frame, style_code, det_face)
                out.write(processed_frame)

            # Release resources
            cap.release()
            out.release()

            return save_path
        except Exception as error:
            print('Error:', error)
            return None

# Create an instance of the AnimeGANv3 class
anime_gan = AnimeGANv3()

# Define the Gradio interface
title = "AnimeGANv3: Video to Anime Converter"
description = r"""Upload a video to convert it into anime style using AnimeGANv3.<br>
Select a style and choose whether to optimize for faces.<br>
<a href='https://github.com/TachibanaYoshino/AnimeGANv3' target='_blank'><b>AnimeGANv3 GitHub</b></a> | 
<a href='https://www.patreon.com/Asher_Chan' target='_blank'><b>Patreon</b></a>"""

iface = gr.Interface(
    fn=anime_gan.inference,
    inputs=[
        gr.Video(label="Input Video"),
        gr.Dropdown(choices=[
            'AnimeGANv3_Hayao',
            'AnimeGANv3_Shinkai',
            'AnimeGANv3_Arcane',
            'AnimeGANv3_Trump v1.0',
            'AnimeGANv3_Disney v1.0',
            'AnimeGANv3_PortraitSketch',
            'AnimeGANv3_JP_face v1.0',
            'AnimeGANv3_Kpop v2.0',
        ], label='AnimeGANv3 Style', value='AnimeGANv3_Arcane'),  # Changed 'default' to 'value'
        gr.Radio(choices=["Yes", "No"], label='Extract face', value="No"),  # Also updated here
    ],
    outputs=[
        gr.Video(label="Output Video")
    ],
    title=title,
    description=description,
    allow_flagging="never"
)

# Launch the interface
iface.launch()