File size: 3,966 Bytes
046e4ad
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f466dd9
046e4ad
f466dd9
9e09422
 
f466dd9
6d1d03a
f466dd9
176d5b1
d07e1c0
 
046e4ad
 
f466dd9
 
 
 
 
046e4ad
 
 
 
 
 
 
 
 
a7e869d
046e4ad
 
 
 
 
 
 
 
f466dd9
 
 
 
046e4ad
f466dd9
9aa8b10
f466dd9
 
9aa8b10
f466dd9
046e4ad
9e09422
 
 
 
f466dd9
046e4ad
f466dd9
 
 
9e09422
f466dd9
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
# import gradio as gr
# import torch
# from diffusers import DiffusionPipeline, AutoPipelineForText2Image
# import base64
# from io import BytesIO



# def load_amused_model():
#     # pipeline = DiffusionPipeline.from_pretrained("Bakanayatsu/ponyDiffusion-V6-XL-Turbo-DPO") 
#     # AutoPipelineForText2Image.from_pretrained("stabilityai/sd-turbo")
#     # AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
#     return DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",
#         safety_checker = None,
#         requires_safety_checker = False)
     

# # Generate image from prompt using AmusedPipeline
# def generate_image(prompt):
#     try:
#         pipe = load_amused_model()
#         generator = torch.Generator().manual_seed(8)  # Create a generator for reproducibility
#         image = pipe(prompt, generator=generator).images[0]  # Generate image from prompt
#         # image = pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
#         return image, None
#     except Exception as e:
#         return None, str(e)

# def inference(prompt):
#     print(f"Received prompt: {prompt}")  # Debugging statement
#     image, error = generate_image(prompt)
#     if error:
#         print(f"Error generating image: {error}")  # Debugging statement
#         return "Error: " + error
    
#     buffered = BytesIO()
#     image.save(buffered, format="PNG")
#     img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
#     return img_str

# gradio_interface = gr.Interface(
#     fn=inference,
#     inputs="text",
#     outputs="text"  # Change output to text to return base64 string
# )

# if __name__ == "__main__":
#     gradio_interface.launch()



import gradio as gr
from diffusers import DiffusionPipeline, DPMSolverSinglestepScheduler
import torch
import base64
from io import BytesIO


def load_amused_model():
    # pipeline = DiffusionPipeline.from_pretrained("Bakanayatsu/ponyDiffusion-V6-XL-Turbo-DPO") 
    # AutoPipelineForText2Image.from_pretrained("stabilityai/sd-turbo")
    # AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
    return DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float32).to("cpu")


# Generate image from prompt using AmusedPipeline
def generate_image(prompt):
    try:
        pipe = load_amused_model()
        pipe.load_lora_weights(
            "mann-e/Mann-E_Turbo",
            weight_name="manne_turbo.safetensors",
        )
        # This is equivalent to DPM++ SDE Karras, as noted in https://huggingface.co/docs/diffusers/main/en/api/schedulers/overview
        pipe.scheduler = DPMSolverSinglestepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True)

        #generator = torch.Generator().manual_seed(8)  # Create a generator for reproducibility
        #image = pipe(prompt, generator=generator).images[0]  # Generate image from prompt
        # image = pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
        image = pipe(
            prompt="a cat in a bustling middle eastern city",
            num_inference_steps=8,
            guidance_scale=4,
            width=768,
            height=768,
            clip_skip=1
        ).images[0]
        return image, None
    except Exception as e:
        return None, str(e)


def inference(prompt):
    print(f"Received prompt: {prompt}")  # Debugging statement
    image, error = generate_image(prompt)
    if error:
        print(f"Error generating image: {error}")  # Debugging statement
        return "Error: " + error

    buffered = BytesIO()
    image.save(buffered, format="PNG")
    img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
    return img_str


gradio_interface = gr.Interface(
    fn=inference,
    inputs="text",
    outputs="text"  # Change output to text to return base64 string
)

if __name__ == "__main__":
    gradio_interface.launch()