File size: 5,154 Bytes
19fe404
e262715
19fe404
 
 
e262715
 
19fe404
 
 
e262715
19fe404
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e262715
19fe404
 
e262715
19fe404
 
e262715
19fe404
e262715
19fe404
e262715
 
19fe404
e262715
 
19fe404
 
e262715
 
 
 
 
 
 
 
 
 
 
19fe404
 
 
 
 
 
 
 
 
 
 
e262715
19fe404
 
e262715
 
19fe404
e262715
 
19fe404
e262715
 
19fe404
 
 
 
e262715
19fe404
e262715
19fe404
 
e262715
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import io
import gc
import base64
import torch
import gradio as gr
import tempfile
import hashlib

from fastapi import FastAPI
from io import BytesIO
from PIL import Image

# Function to encode a file to Base64
def encode_file_to_base64(file_path):
    with open(file_path, "rb") as file:
        # Encode the data to Base64
        file_base64 = base64.b64encode(file.read())
        return file_base64

def update_edition_api(_: gr.Blocks, app: FastAPI, controller):
    @app.post("/easyanimate/update_edition")
    def _update_edition_api(
        datas: dict,
    ):
        edition = datas.get('edition', 'v2')

        try:
            controller.update_edition(
                edition
            )
            comment = "Success"
        except Exception as e:
            torch.cuda.empty_cache()
            comment = f"Error. error information is {str(e)}"

        return {"message": comment}

def update_diffusion_transformer_api(_: gr.Blocks, app: FastAPI, controller):
    @app.post("/easyanimate/update_diffusion_transformer")
    def _update_diffusion_transformer_api(
        datas: dict,
    ):
        diffusion_transformer_path = datas.get('diffusion_transformer_path', 'none')

        try:
            controller.update_diffusion_transformer(
                diffusion_transformer_path
            )
            comment = "Success"
        except Exception as e:
            torch.cuda.empty_cache()
            comment = f"Error. error information is {str(e)}"

        return {"message": comment}

def infer_forward_api(_: gr.Blocks, app: FastAPI, controller):
    @app.post("/easyanimate/infer_forward")
    def _infer_forward_api(
        datas: dict,
    ):
        base_model_path = datas.get('base_model_path', 'none')
        motion_module_path = datas.get('motion_module_path', 'none')
        lora_model_path = datas.get('lora_model_path', 'none')
        lora_alpha_slider = datas.get('lora_alpha_slider', 0.55)
        prompt_textbox = datas.get('prompt_textbox', None)
        negative_prompt_textbox = datas.get('negative_prompt_textbox', 'The video is not of a high quality, it has a low resolution, and the audio quality is not clear. Strange motion trajectory, a poor composition and deformed video, low resolution, duplicate and ugly, strange body structure, long and strange neck, bad teeth, bad eyes, bad limbs, bad hands, rotating camera, blurry camera, shaking camera. Deformation, low-resolution, blurry, ugly, distortion.')
        sampler_dropdown = datas.get('sampler_dropdown', 'Euler')
        sample_step_slider = datas.get('sample_step_slider', 30)
        resize_method = datas.get('resize_method', "Generate by")
        width_slider = datas.get('width_slider', 672)
        height_slider = datas.get('height_slider', 384)
        base_resolution = datas.get('base_resolution', 512)
        is_image = datas.get('is_image', False)
        generation_method = datas.get('generation_method', False)
        length_slider = datas.get('length_slider', 144)
        overlap_video_length = datas.get('overlap_video_length', 4)
        partial_video_length = datas.get('partial_video_length', 72)
        cfg_scale_slider = datas.get('cfg_scale_slider', 6)
        start_image = datas.get('start_image', None)
        end_image = datas.get('end_image', None)
        seed_textbox = datas.get("seed_textbox", 43)

        generation_method = "Image Generation" if is_image else generation_method

        temp_directory = tempfile.gettempdir()
        if start_image is not None:
            start_image = base64.b64decode(start_image)
            start_image = [Image.open(BytesIO(start_image))]
        
        if end_image is not None:
            end_image = base64.b64decode(end_image)
            end_image = [Image.open(BytesIO(end_image))]

        try:
            save_sample_path, comment = controller.generate(
                "",
                base_model_path,
                motion_module_path,
                lora_model_path, 
                lora_alpha_slider,
                prompt_textbox, 
                negative_prompt_textbox, 
                sampler_dropdown, 
                sample_step_slider, 
                resize_method,
                width_slider, 
                height_slider, 
                base_resolution,
                generation_method,
                length_slider, 
                overlap_video_length, 
                partial_video_length, 
                cfg_scale_slider, 
                start_image,
                end_image,
                seed_textbox,
                is_api = True,
            )
        except Exception as e:
            gc.collect()
            torch.cuda.empty_cache()
            torch.cuda.ipc_collect()
            save_sample_path = ""
            comment = f"Error. error information is {str(e)}"
            return {"message": comment}
        
        if save_sample_path != "":
            return {"message": comment, "save_sample_path": save_sample_path, "base64_encoding": encode_file_to_base64(save_sample_path)}
        else:
            return {"message": comment, "save_sample_path": save_sample_path}