|
import subprocess |
|
import os |
|
import gradio as gr |
|
import torch |
|
|
|
if torch.cuda.is_available(): |
|
device = "cuda" |
|
print("Using GPU") |
|
else: |
|
device = "cpu" |
|
print("Using CPU") |
|
|
|
|
|
subprocess.run(["git", "clone", "https://github.com/Nick088Official/Stable_Diffusion_Finetuned_Minecraft_Skin_Generator.git"]) |
|
os.chdir("Stable_Diffusion_Finetuned_Minecraft_Skin_Generator") |
|
|
|
|
|
def generate( |
|
system_prompt, |
|
prompt, |
|
max_new_tokens, |
|
repetition_penalty, |
|
temperature, |
|
top_p, |
|
top_k, |
|
seed |
|
): |
|
|
|
input_text = f"{system_prompt}, {prompt}" |
|
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device) |
|
|
|
if seed == 0: |
|
seed = random.randint(1, 100000) |
|
torch.manual_seed(seed) |
|
else: |
|
torch.manual_seed(seed) |
|
|
|
outputs = model.generate( |
|
input_ids, |
|
max_new_tokens=max_new_tokens, |
|
repetition_penalty=repetition_penalty, |
|
do_sample=True, |
|
temperature=temperature, |
|
top_p=top_p, |
|
top_k=top_k, |
|
) |
|
|
|
better_prompt = tokenizer.decode(outputs[0]) |
|
better_prompt = better_prompt.replace("<pad>", "").replace("</s>", "") |
|
return better_prompt |
|
|
|
|
|
prompt = gr.Textbox(label="Prompt", interactive=True) |
|
|
|
stable_diffusion_model = gr.Dropdown(["2", "xl"], interactive=True, label="Stable Diffusion Model", value="xl", type=value, info="Choose which Stable Diffusion Model to use, xl understands prompts better") |
|
|
|
num_inference_steps = gr.Number(value=50, minimum=1, precision=0, interactive=True, label="Inference Steps", info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference") |
|
|
|
guidance_scale = gr.Number(value=7.5, minimum=0.1, interactive=True, label="Guidance Scale", info="How closely the generated image adheres to the prompt") |
|
|
|
num_images_per_prompt = gr.Number(value=1, minimum=1, precision=0, interactive=True, label="Images Per Prompt", info="The number of images to make with the prompt") |
|
|
|
model_precision_type = gr.Dropdown(["fp16", "fp32"], value="fp16" interactive=True, label="Model Precision Type", info="The precision type to load the model, like fp16 which is faster, or fp32 which gives better results") |
|
|
|
seed = gr.Number(value=42, interactive=True, label="Seed", info="A starting point to initiate the generation process, put 0 for a random one") |
|
|
|
examples = [ |
|
[ |
|
"A man in a purple suit wearing a tophat.", |
|
"xl", |
|
25, |
|
7.5, |
|
1, |
|
"fp16", |
|
42, |
|
] |
|
] |
|
|
|
gr.Interface( |
|
fn=generate, |
|
inputs=[prompt, stable_diffusion_model, num_inference_steps, guidance_scale, num_images_per_prompt, model_precision_type, seed], |
|
outputs=gr.Textbox(label="Generated Minecraft Skin"), |
|
title="Stable Diffusion Finetuned Minecraft Skin Generator", |
|
description="Make your prompts more detailed!<br>Model used: https://huggingface.co/roborovski/superprompt-v1<br>Hugging Face Space made by [Nick088](https://linktr.ee/Nick088)", |
|
examples=examples, |
|
concurrency_limit=20, |
|
).launch(show_api=False) |
|
|