File size: 3,582 Bytes
250aa6c
 
 
 
 
 
 
7e0e8fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250aa6c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import gradio as gr
import torch
import numpy as np
import modin.pandas as pd
from PIL import Image
from diffusers import DiffusionPipeline, StableDiffusionLatentUpscalePipeline

css = """
  <style>
  .finetuned-diffusion-div {
      text-align: center;
      max-width: 700px;
      margin: 0 auto;
    }
    .finetuned-diffusion-div div {
      display: inline-flex;
      align-items: center;
      gap: 0.8rem;
      font-size: 1.75rem;
    }
    .finetuned-diffusion-div div h1 {
      font-weight: 900;
      margin-bottom: 7px;
    }
    .finetuned-diffusion-div p {
      margin-bottom: 10px;
      font-size: 94%;
    }
    .finetuned-diffusion-div p a {
      text-decoration: underline;
    }
  </style>

device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = DiffusionPipeline.from_pretrained("dreamlike-art/dreamlike-photoreal-2.0", torch_dtype=torch.float16, safety_checker=None)
upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16)
upscaler = upscaler.to(device)
pipe = pipe.to(device)

def genie (Prompt, negative_prompt, height, width, scale, steps, seed, upscale, upscale_prompt, upscale_neg, upscale_scale, upscale_steps):
    generator = torch.Generator(device=device).manual_seed(seed)
    if upscale == "Yes":
        low_res_latents = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, generator=generator, output_type="latent").images
        image = upscaler(prompt=upscale_prompt, negative_prompt=upscale_neg, image=low_res_latents, num_inference_steps=upscale_steps, guidance_scale=upscale_scale, generator=generator).images[0]
    else:
        image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, generator=generator).images[0]
    return image
    
gr.Interface(theme='HaleyCH/HaleyCH_Theme', fn=genie, inputs=[gr.Textbox(label='Input field right under here(Prompt)'), 
                               gr.Textbox(label='What You dont want (Negative Prompt)'),
                               gr.Slider(512, 1024, 768, step=128, label='Height'),
                               gr.Slider(512, 1024, 768, step=128, label='Width'),
                               gr.Slider(1, maximum=15, value=10, step=.25), 
                               gr.Slider(25, maximum=100, value=50, step=25), 
                               gr.Slider(minimum=1, step=1, maximum=9999999999999999, randomize=True), 
                               gr.Radio(["Yes", "No"], label='Upscale?'),
                               gr.Textbox(label='Upscaler Prompt: Optional'),
                               gr.Textbox(label='Upscaler Negative Prompt: Both Optional And Experimental'),
                               gr.Slider(minimum=0, maximum=15, value=0, step=1, label='Upscale Guidance Scale'),
                               gr.Slider(minimum=5, maximum=25, value=5, step=5, label='Upscaler Iterations')
             
                              ],
             outputs=gr.Image(label='Generated Image'), 
             title="Our Free Image Creator", 
             description="<br><br><b/> TIPS:<br>To get the best result read the instructions undeneath the App.<br> This Free App is slow producing Images.<br> Join us , And get access to this App and many more. They work faster and have more advanced features", 
             article = "Online App: <a href=\"https://www.aichatbot.ai\">www.aichatbot.ai</a>").launch(debug=True, max_threads=True)