Spaces:
Build error
Build error
Upload 3 files (#8)
Browse files- Upload 3 files (b5ef021ecf5f1ea021b86966f96c777b8ca6430e)
Co-authored-by: John Smith <[email protected]>
- README.md +12 -12
- app.py +219 -158
- externalmod.py +105 -24
README.md
CHANGED
|
@@ -1,13 +1,13 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: Huggingface Diffusion
|
| 3 |
-
emoji: 🛕🛕
|
| 4 |
-
colorFrom: green
|
| 5 |
-
colorTo: blue
|
| 6 |
-
sdk: gradio
|
| 7 |
-
sdk_version: 4.
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: true
|
| 10 |
-
short_description: Compare 909+ AI Art Models 6 at a time!
|
| 11 |
-
---
|
| 12 |
-
|
| 13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Huggingface Diffusion
|
| 3 |
+
emoji: 🛕🛕
|
| 4 |
+
colorFrom: green
|
| 5 |
+
colorTo: blue
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 4.42.0
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: true
|
| 10 |
+
short_description: Compare 909+ AI Art Models 6 at a time!
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
|
@@ -1,158 +1,219 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
from
|
| 3 |
-
from
|
| 4 |
-
|
| 5 |
-
import
|
| 6 |
-
from threading import RLock
|
| 7 |
-
lock = RLock()
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
def
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
.
|
| 107 |
-
.
|
| 108 |
-
.
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from all_models import models
|
| 3 |
+
from externalmod import gr_Interface_load, save_image, randomize_seed
|
| 4 |
+
import asyncio
|
| 5 |
+
import os
|
| 6 |
+
from threading import RLock
|
| 7 |
+
lock = RLock()
|
| 8 |
+
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def load_fn(models):
|
| 12 |
+
global models_load
|
| 13 |
+
models_load = {}
|
| 14 |
+
for model in models:
|
| 15 |
+
if model not in models_load.keys():
|
| 16 |
+
try:
|
| 17 |
+
m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
|
| 18 |
+
except Exception as error:
|
| 19 |
+
print(error)
|
| 20 |
+
m = gr.Interface(lambda: None, ['text'], ['image'])
|
| 21 |
+
models_load.update({model: m})
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
load_fn(models)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
num_models = 6
|
| 28 |
+
max_images = 6
|
| 29 |
+
inference_timeout = 300
|
| 30 |
+
default_models = models[:num_models]
|
| 31 |
+
MAX_SEED = 2**32-1
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def extend_choices(choices):
|
| 35 |
+
return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def update_imgbox(choices):
|
| 39 |
+
choices_plus = extend_choices(choices[:num_models])
|
| 40 |
+
return [gr.Image(None, label=m, visible=(m!='NA')) for m in choices_plus]
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def random_choices():
|
| 44 |
+
import random
|
| 45 |
+
random.seed()
|
| 46 |
+
return random.choices(models, k=num_models)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# https://huggingface.co/docs/api-inference/detailed_parameters
|
| 50 |
+
# https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
|
| 51 |
+
async def infer(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout):
|
| 52 |
+
kwargs = {}
|
| 53 |
+
if height > 0: kwargs["height"] = height
|
| 54 |
+
if width > 0: kwargs["width"] = width
|
| 55 |
+
if steps > 0: kwargs["num_inference_steps"] = steps
|
| 56 |
+
if cfg > 0: cfg = kwargs["guidance_scale"] = cfg
|
| 57 |
+
if seed == -1: kwargs["seed"] = randomize_seed()
|
| 58 |
+
else: kwargs["seed"] = seed
|
| 59 |
+
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
|
| 60 |
+
prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
|
| 61 |
+
await asyncio.sleep(0)
|
| 62 |
+
try:
|
| 63 |
+
result = await asyncio.wait_for(task, timeout=timeout)
|
| 64 |
+
except asyncio.TimeoutError as e:
|
| 65 |
+
print(e)
|
| 66 |
+
print(f"Task timed out: {model_str}")
|
| 67 |
+
if not task.done(): task.cancel()
|
| 68 |
+
result = None
|
| 69 |
+
raise Exception(f"Task timed out: {model_str}") from e
|
| 70 |
+
except Exception as e:
|
| 71 |
+
print(e)
|
| 72 |
+
if not task.done(): task.cancel()
|
| 73 |
+
result = None
|
| 74 |
+
raise Exception() from e
|
| 75 |
+
if task.done() and result is not None and not isinstance(result, tuple):
|
| 76 |
+
with lock:
|
| 77 |
+
png_path = "image.png"
|
| 78 |
+
image = save_image(result, png_path, model_str, prompt, nprompt, height, width, steps, cfg, seed)
|
| 79 |
+
return image
|
| 80 |
+
return None
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def gen_fn(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1):
|
| 84 |
+
try:
|
| 85 |
+
loop = asyncio.new_event_loop()
|
| 86 |
+
result = loop.run_until_complete(infer(model_str, prompt, nprompt,
|
| 87 |
+
height, width, steps, cfg, seed, inference_timeout))
|
| 88 |
+
except (Exception, asyncio.CancelledError) as e:
|
| 89 |
+
print(e)
|
| 90 |
+
print(f"Task aborted: {model_str}")
|
| 91 |
+
result = None
|
| 92 |
+
raise gr.Error(f"Task aborted: {model_str}, Error: {e}")
|
| 93 |
+
finally:
|
| 94 |
+
loop.close()
|
| 95 |
+
return result
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def add_gallery(image, model_str, gallery):
|
| 99 |
+
if gallery is None: gallery = []
|
| 100 |
+
with lock:
|
| 101 |
+
if image is not None: gallery.insert(0, (image, model_str))
|
| 102 |
+
return gallery
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
CSS="""
|
| 106 |
+
.gradio-container { max-width: 1200px; margin: 0 auto; !important; }
|
| 107 |
+
.output { width=112px; height=112px; max_width=112px; max_height=112px; !important; }
|
| 108 |
+
.gallery { min_width=512px; min_height=512px; max_height=1024px; !important; }
|
| 109 |
+
.guide { text-align: center; !important; }
|
| 110 |
+
"""
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo:
|
| 114 |
+
gr.HTML(
|
| 115 |
+
"""
|
| 116 |
+
<div>
|
| 117 |
+
<p> <center>For simultaneous generations without hidden queue check out <a href="https://huggingface.co/spaces/Yntec/ToyWorld">Toy World</a>! For more options like single model x6 check out <a href="https://huggingface.co/spaces/John6666/Diffusion80XX4sg">Diffusion80XX4sg</a> by John6666!</center>
|
| 118 |
+
</p></div>
|
| 119 |
+
"""
|
| 120 |
+
)
|
| 121 |
+
with gr.Tab('Huggingface Diffusion'):
|
| 122 |
+
with gr.Column(scale=2):
|
| 123 |
+
with gr.Group():
|
| 124 |
+
txt_input = gr.Textbox(label='Your prompt:', lines=4)
|
| 125 |
+
neg_input = gr.Textbox(label='Negative prompt:', lines=1)
|
| 126 |
+
with gr.Accordion("Advanced", open=False, visible=True):
|
| 127 |
+
with gr.Row():
|
| 128 |
+
width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
|
| 129 |
+
height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
|
| 130 |
+
with gr.Row():
|
| 131 |
+
steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
|
| 132 |
+
cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
|
| 133 |
+
seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
|
| 134 |
+
seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary")
|
| 135 |
+
seed_rand.click(randomize_seed, None, [seed], queue=False)
|
| 136 |
+
with gr.Row():
|
| 137 |
+
gen_button = gr.Button(f'Generate up to {int(num_models)} images in up to 3 minutes total', variant='primary', scale=3)
|
| 138 |
+
random_button = gr.Button(f'Random {int(num_models)} 🎲', variant='secondary', scale=1)
|
| 139 |
+
#stop_button = gr.Button('Stop', variant='stop', interactive=False, scale=1)
|
| 140 |
+
#gen_button.click(lambda: gr.update(interactive=True), None, stop_button)
|
| 141 |
+
gr.Markdown("Scroll down to see more images and select models.", elem_classes="guide")
|
| 142 |
+
|
| 143 |
+
with gr.Column(scale=1):
|
| 144 |
+
with gr.Group():
|
| 145 |
+
with gr.Row():
|
| 146 |
+
output = [gr.Image(label=m, show_download_button=True, elem_classes="output",
|
| 147 |
+
interactive=False, min_width=80, show_share_button=False, format="png",
|
| 148 |
+
visible=True) for m in default_models]
|
| 149 |
+
current_models = [gr.Textbox(m, visible=False) for m in default_models]
|
| 150 |
+
|
| 151 |
+
with gr.Column(scale=2):
|
| 152 |
+
gallery = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery",
|
| 153 |
+
interactive=False, show_share_button=True, container=True, format="png",
|
| 154 |
+
preview=True, object_fit="cover", columns=2, rows=2)
|
| 155 |
+
|
| 156 |
+
for m, o in zip(current_models, output):
|
| 157 |
+
gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
|
| 158 |
+
inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o],
|
| 159 |
+
concurrency_limit=None, queue=False) # Be sure to delete ", queue=False" when activating the stop button
|
| 160 |
+
o.change(add_gallery, [o, m, gallery], [gallery])
|
| 161 |
+
#stop_button.click(lambda: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
|
| 162 |
+
|
| 163 |
+
with gr.Column(scale=4):
|
| 164 |
+
with gr.Accordion('Model selection'):
|
| 165 |
+
model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
|
| 166 |
+
model_choice.change(update_imgbox, model_choice, output)
|
| 167 |
+
model_choice.change(extend_choices, model_choice, current_models)
|
| 168 |
+
random_button.click(random_choices, None, model_choice)
|
| 169 |
+
|
| 170 |
+
with gr.Tab('Single model'):
|
| 171 |
+
with gr.Column(scale=2):
|
| 172 |
+
model_choice2 = gr.Dropdown(models, label='Choose model', value=models[0])
|
| 173 |
+
with gr.Group():
|
| 174 |
+
txt_input2 = gr.Textbox(label='Your prompt:', lines=4)
|
| 175 |
+
neg_input2 = gr.Textbox(label='Negative prompt:', lines=1)
|
| 176 |
+
with gr.Accordion("Advanced", open=False, visible=True):
|
| 177 |
+
with gr.Row():
|
| 178 |
+
width2 = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
|
| 179 |
+
height2 = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
|
| 180 |
+
with gr.Row():
|
| 181 |
+
steps2 = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
|
| 182 |
+
cfg2 = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
|
| 183 |
+
seed2 = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
|
| 184 |
+
seed_rand2 = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary")
|
| 185 |
+
seed_rand2.click(randomize_seed, None, [seed2], queue=False)
|
| 186 |
+
num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
|
| 187 |
+
with gr.Row():
|
| 188 |
+
gen_button2 = gr.Button('Generate', variant='primary', scale=2)
|
| 189 |
+
#stop_button2 = gr.Button('Stop', variant='stop', interactive=False, scale=1)
|
| 190 |
+
#gen_button2.click(lambda: gr.update(interactive=True), None, stop_button2)
|
| 191 |
+
|
| 192 |
+
with gr.Column(scale=1):
|
| 193 |
+
with gr.Group():
|
| 194 |
+
with gr.Row():
|
| 195 |
+
output2 = [gr.Image(label='', show_download_button=True, elem_classes="output",
|
| 196 |
+
interactive=False, min_width=80, visible=True, format="png",
|
| 197 |
+
show_share_button=False, show_label=False) for _ in range(max_images)]
|
| 198 |
+
|
| 199 |
+
with gr.Column(scale=2):
|
| 200 |
+
gallery2 = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery",
|
| 201 |
+
interactive=False, show_share_button=True, container=True, format="png",
|
| 202 |
+
preview=True, object_fit="cover", columns=2, rows=2)
|
| 203 |
+
|
| 204 |
+
for i, o in enumerate(output2):
|
| 205 |
+
img_i = gr.Number(i, visible=False)
|
| 206 |
+
num_images.change(lambda i, n: gr.update(visible = (i < n)), [img_i, num_images], o, queue=False)
|
| 207 |
+
gen_event2 = gr.on(triggers=[gen_button2.click, txt_input2.submit],
|
| 208 |
+
fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
|
| 209 |
+
inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
|
| 210 |
+
height2, width2, steps2, cfg2, seed2], outputs=[o],
|
| 211 |
+
concurrency_limit=None, queue=False) # Be sure to delete ", queue=False" when activating the stop button
|
| 212 |
+
o.change(add_gallery, [o, model_choice2, gallery2], [gallery2])
|
| 213 |
+
#stop_button2.click(lambda: gr.update(interactive=False), None, stop_button2, cancels=[gen_event2])
|
| 214 |
+
|
| 215 |
+
gr.Markdown("Based on the [TestGen](https://huggingface.co/spaces/derwahnsinn/TestGen) Space by derwahnsinn, the [SpacIO](https://huggingface.co/spaces/RdnUser77/SpacIO_v1) Space by RdnUser77 and Omnibus's Maximum Multiplier!")
|
| 216 |
+
|
| 217 |
+
demo.queue(default_concurrency_limit=200, max_size=200)
|
| 218 |
+
demo.launch(show_api=False, max_threads=400)
|
| 219 |
+
# https://github.com/gradio-app/gradio/issues/6339
|
externalmod.py
CHANGED
|
@@ -9,7 +9,7 @@ import re
|
|
| 9 |
import tempfile
|
| 10 |
import warnings
|
| 11 |
from pathlib import Path
|
| 12 |
-
from typing import TYPE_CHECKING, Callable
|
| 13 |
|
| 14 |
import httpx
|
| 15 |
import huggingface_hub
|
|
@@ -33,11 +33,15 @@ if TYPE_CHECKING:
|
|
| 33 |
from gradio.interface import Interface
|
| 34 |
|
| 35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
@document()
|
| 37 |
def load(
|
| 38 |
name: str,
|
| 39 |
src: str | None = None,
|
| 40 |
-
hf_token: str | None = None,
|
| 41 |
alias: str | None = None,
|
| 42 |
**kwargs,
|
| 43 |
) -> Blocks:
|
|
@@ -48,7 +52,7 @@ def load(
|
|
| 48 |
Parameters:
|
| 49 |
name: the name of the model (e.g. "gpt2" or "facebook/bart-base") or space (e.g. "flax-community/spanish-gpt2"), can include the `src` as prefix (e.g. "models/facebook/bart-base")
|
| 50 |
src: the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)
|
| 51 |
-
hf_token: optional access token for loading private Hugging Face Hub models or spaces. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide
|
| 52 |
alias: optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)
|
| 53 |
Returns:
|
| 54 |
a Gradio Blocks object for the given model
|
|
@@ -65,7 +69,7 @@ def load(
|
|
| 65 |
def load_blocks_from_repo(
|
| 66 |
name: str,
|
| 67 |
src: str | None = None,
|
| 68 |
-
hf_token: str | None = None,
|
| 69 |
alias: str | None = None,
|
| 70 |
**kwargs,
|
| 71 |
) -> Blocks:
|
|
@@ -89,7 +93,7 @@ def load_blocks_from_repo(
|
|
| 89 |
if src.lower() not in factory_methods:
|
| 90 |
raise ValueError(f"parameter: src must be one of {factory_methods.keys()}")
|
| 91 |
|
| 92 |
-
if hf_token is not None:
|
| 93 |
if Context.hf_token is not None and Context.hf_token != hf_token:
|
| 94 |
warnings.warn(
|
| 95 |
"""You are loading a model/Space with a different access token than the one you used to load a previous model/Space. This is not recommended, as it may cause unexpected behavior."""
|
|
@@ -100,12 +104,16 @@ def load_blocks_from_repo(
|
|
| 100 |
return blocks
|
| 101 |
|
| 102 |
|
| 103 |
-
def from_model(
|
|
|
|
|
|
|
| 104 |
model_url = f"https://huggingface.co/{model_name}"
|
| 105 |
api_url = f"https://api-inference.huggingface.co/models/{model_name}"
|
| 106 |
print(f"Fetching model from: {model_url}")
|
| 107 |
|
| 108 |
-
headers =
|
|
|
|
|
|
|
| 109 |
response = httpx.request("GET", api_url, headers=headers)
|
| 110 |
if response.status_code != 200:
|
| 111 |
raise ModelNotFoundError(
|
|
@@ -115,7 +123,7 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
|
|
| 115 |
|
| 116 |
headers["X-Wait-For-Model"] = "true"
|
| 117 |
client = huggingface_hub.InferenceClient(
|
| 118 |
-
model=model_name, headers=headers, token=hf_token,
|
| 119 |
)
|
| 120 |
|
| 121 |
# For tasks that are not yet supported by the InferenceClient
|
|
@@ -365,10 +373,14 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
|
|
| 365 |
else:
|
| 366 |
raise ValueError(f"Unsupported pipeline type: {p}")
|
| 367 |
|
| 368 |
-
def query_huggingface_inference_endpoints(*data):
|
| 369 |
if preprocess is not None:
|
| 370 |
data = preprocess(*data)
|
| 371 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 372 |
if postprocess is not None:
|
| 373 |
data = postprocess(data) # type: ignore
|
| 374 |
return data
|
|
@@ -380,7 +392,7 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
|
|
| 380 |
"inputs": inputs,
|
| 381 |
"outputs": outputs,
|
| 382 |
"title": model_name,
|
| 383 |
-
|
| 384 |
}
|
| 385 |
|
| 386 |
kwargs = dict(interface_info, **kwargs)
|
|
@@ -391,19 +403,12 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
|
|
| 391 |
def from_spaces(
|
| 392 |
space_name: str, hf_token: str | None, alias: str | None, **kwargs
|
| 393 |
) -> Blocks:
|
| 394 |
-
client = Client(
|
| 395 |
-
space_name,
|
| 396 |
-
hf_token=hf_token,
|
| 397 |
-
download_files=False,
|
| 398 |
-
_skip_components=False,
|
| 399 |
-
)
|
| 400 |
-
|
| 401 |
space_url = f"https://huggingface.co/spaces/{space_name}"
|
| 402 |
|
| 403 |
print(f"Fetching Space from: {space_url}")
|
| 404 |
|
| 405 |
headers = {}
|
| 406 |
-
if hf_token
|
| 407 |
headers["Authorization"] = f"Bearer {hf_token}"
|
| 408 |
|
| 409 |
iframe_url = (
|
|
@@ -440,8 +445,7 @@ def from_spaces(
|
|
| 440 |
"Blocks or Interface locally. You may find this Guide helpful: "
|
| 441 |
"https://gradio.app/using_blocks_like_functions/"
|
| 442 |
)
|
| 443 |
-
|
| 444 |
-
return from_spaces_blocks(space=space_name, hf_token=hf_token)
|
| 445 |
|
| 446 |
|
| 447 |
def from_spaces_blocks(space: str, hf_token: str | None) -> Blocks:
|
|
@@ -486,7 +490,7 @@ def from_spaces_interface(
|
|
| 486 |
config = external_utils.streamline_spaces_interface(config)
|
| 487 |
api_url = f"{iframe_url}/api/predict/"
|
| 488 |
headers = {"Content-Type": "application/json"}
|
| 489 |
-
if hf_token
|
| 490 |
headers["Authorization"] = f"Bearer {hf_token}"
|
| 491 |
|
| 492 |
# The function should call the API with preprocessed data
|
|
@@ -526,6 +530,83 @@ def gr_Interface_load(
|
|
| 526 |
src: str | None = None,
|
| 527 |
hf_token: str | None = None,
|
| 528 |
alias: str | None = None,
|
| 529 |
-
**kwargs,
|
| 530 |
) -> Blocks:
|
| 531 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
import tempfile
|
| 10 |
import warnings
|
| 11 |
from pathlib import Path
|
| 12 |
+
from typing import TYPE_CHECKING, Callable, Literal
|
| 13 |
|
| 14 |
import httpx
|
| 15 |
import huggingface_hub
|
|
|
|
| 33 |
from gradio.interface import Interface
|
| 34 |
|
| 35 |
|
| 36 |
+
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
|
| 37 |
+
server_timeout = 600
|
| 38 |
+
|
| 39 |
+
|
| 40 |
@document()
|
| 41 |
def load(
|
| 42 |
name: str,
|
| 43 |
src: str | None = None,
|
| 44 |
+
hf_token: str | Literal[False] | None = None,
|
| 45 |
alias: str | None = None,
|
| 46 |
**kwargs,
|
| 47 |
) -> Blocks:
|
|
|
|
| 52 |
Parameters:
|
| 53 |
name: the name of the model (e.g. "gpt2" or "facebook/bart-base") or space (e.g. "flax-community/spanish-gpt2"), can include the `src` as prefix (e.g. "models/facebook/bart-base")
|
| 54 |
src: the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)
|
| 55 |
+
hf_token: optional access token for loading private Hugging Face Hub models or spaces. Will default to the locally saved token if not provided. Pass `token=False` if you don't want to send your token to the server. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide a token if you are loading a trusted private Space as it can be read by the Space you are loading.
|
| 56 |
alias: optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)
|
| 57 |
Returns:
|
| 58 |
a Gradio Blocks object for the given model
|
|
|
|
| 69 |
def load_blocks_from_repo(
|
| 70 |
name: str,
|
| 71 |
src: str | None = None,
|
| 72 |
+
hf_token: str | Literal[False] | None = None,
|
| 73 |
alias: str | None = None,
|
| 74 |
**kwargs,
|
| 75 |
) -> Blocks:
|
|
|
|
| 93 |
if src.lower() not in factory_methods:
|
| 94 |
raise ValueError(f"parameter: src must be one of {factory_methods.keys()}")
|
| 95 |
|
| 96 |
+
if hf_token is not None and hf_token is not False:
|
| 97 |
if Context.hf_token is not None and Context.hf_token != hf_token:
|
| 98 |
warnings.warn(
|
| 99 |
"""You are loading a model/Space with a different access token than the one you used to load a previous model/Space. This is not recommended, as it may cause unexpected behavior."""
|
|
|
|
| 104 |
return blocks
|
| 105 |
|
| 106 |
|
| 107 |
+
def from_model(
|
| 108 |
+
model_name: str, hf_token: str | Literal[False] | None, alias: str | None, **kwargs
|
| 109 |
+
):
|
| 110 |
model_url = f"https://huggingface.co/{model_name}"
|
| 111 |
api_url = f"https://api-inference.huggingface.co/models/{model_name}"
|
| 112 |
print(f"Fetching model from: {model_url}")
|
| 113 |
|
| 114 |
+
headers = (
|
| 115 |
+
{} if hf_token in [False, None] else {"Authorization": f"Bearer {hf_token}"}
|
| 116 |
+
)
|
| 117 |
response = httpx.request("GET", api_url, headers=headers)
|
| 118 |
if response.status_code != 200:
|
| 119 |
raise ModelNotFoundError(
|
|
|
|
| 123 |
|
| 124 |
headers["X-Wait-For-Model"] = "true"
|
| 125 |
client = huggingface_hub.InferenceClient(
|
| 126 |
+
model=model_name, headers=headers, token=hf_token, timeout=server_timeout,
|
| 127 |
)
|
| 128 |
|
| 129 |
# For tasks that are not yet supported by the InferenceClient
|
|
|
|
| 373 |
else:
|
| 374 |
raise ValueError(f"Unsupported pipeline type: {p}")
|
| 375 |
|
| 376 |
+
def query_huggingface_inference_endpoints(*data, **kwargs):
|
| 377 |
if preprocess is not None:
|
| 378 |
data = preprocess(*data)
|
| 379 |
+
try:
|
| 380 |
+
data = fn(*data, **kwargs) # type: ignore
|
| 381 |
+
except huggingface_hub.utils.HfHubHTTPError as e:
|
| 382 |
+
if "429" in str(e):
|
| 383 |
+
raise TooManyRequestsError() from e
|
| 384 |
if postprocess is not None:
|
| 385 |
data = postprocess(data) # type: ignore
|
| 386 |
return data
|
|
|
|
| 392 |
"inputs": inputs,
|
| 393 |
"outputs": outputs,
|
| 394 |
"title": model_name,
|
| 395 |
+
#"examples": examples,
|
| 396 |
}
|
| 397 |
|
| 398 |
kwargs = dict(interface_info, **kwargs)
|
|
|
|
| 403 |
def from_spaces(
|
| 404 |
space_name: str, hf_token: str | None, alias: str | None, **kwargs
|
| 405 |
) -> Blocks:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 406 |
space_url = f"https://huggingface.co/spaces/{space_name}"
|
| 407 |
|
| 408 |
print(f"Fetching Space from: {space_url}")
|
| 409 |
|
| 410 |
headers = {}
|
| 411 |
+
if hf_token not in [False, None]:
|
| 412 |
headers["Authorization"] = f"Bearer {hf_token}"
|
| 413 |
|
| 414 |
iframe_url = (
|
|
|
|
| 445 |
"Blocks or Interface locally. You may find this Guide helpful: "
|
| 446 |
"https://gradio.app/using_blocks_like_functions/"
|
| 447 |
)
|
| 448 |
+
return from_spaces_blocks(space=space_name, hf_token=hf_token)
|
|
|
|
| 449 |
|
| 450 |
|
| 451 |
def from_spaces_blocks(space: str, hf_token: str | None) -> Blocks:
|
|
|
|
| 490 |
config = external_utils.streamline_spaces_interface(config)
|
| 491 |
api_url = f"{iframe_url}/api/predict/"
|
| 492 |
headers = {"Content-Type": "application/json"}
|
| 493 |
+
if hf_token not in [False, None]:
|
| 494 |
headers["Authorization"] = f"Bearer {hf_token}"
|
| 495 |
|
| 496 |
# The function should call the API with preprocessed data
|
|
|
|
| 530 |
src: str | None = None,
|
| 531 |
hf_token: str | None = None,
|
| 532 |
alias: str | None = None,
|
| 533 |
+
**kwargs, # ignore
|
| 534 |
) -> Blocks:
|
| 535 |
+
try:
|
| 536 |
+
return load_blocks_from_repo(name, src, hf_token, alias)
|
| 537 |
+
except Exception as e:
|
| 538 |
+
print(e)
|
| 539 |
+
return gradio.Interface(lambda: None, ['text'], ['image'])
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
def list_uniq(l):
|
| 543 |
+
return sorted(set(l), key=l.index)
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
def get_status(model_name: str):
|
| 547 |
+
from huggingface_hub import AsyncInferenceClient
|
| 548 |
+
client = AsyncInferenceClient(token=HF_TOKEN, timeout=10)
|
| 549 |
+
return client.get_model_status(model_name)
|
| 550 |
+
|
| 551 |
+
|
| 552 |
+
def is_loadable(model_name: str, force_gpu: bool = False):
|
| 553 |
+
try:
|
| 554 |
+
status = get_status(model_name)
|
| 555 |
+
except Exception as e:
|
| 556 |
+
print(e)
|
| 557 |
+
print(f"Couldn't load {model_name}.")
|
| 558 |
+
return False
|
| 559 |
+
gpu_state = isinstance(status.compute_type, dict) and "gpu" in status.compute_type.keys()
|
| 560 |
+
if status is None or status.state not in ["Loadable", "Loaded"] or (force_gpu and not gpu_state):
|
| 561 |
+
print(f"Couldn't load {model_name}. Model state:'{status.state}', GPU:{gpu_state}")
|
| 562 |
+
return status is not None and status.state in ["Loadable", "Loaded"] and (not force_gpu or gpu_state)
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="last_modified", limit: int=30, force_gpu=False, check_status=False):
|
| 566 |
+
from huggingface_hub import HfApi
|
| 567 |
+
api = HfApi(token=HF_TOKEN)
|
| 568 |
+
default_tags = ["diffusers"]
|
| 569 |
+
if not sort: sort = "last_modified"
|
| 570 |
+
limit = limit * 20 if check_status and force_gpu else limit * 5
|
| 571 |
+
models = []
|
| 572 |
+
try:
|
| 573 |
+
model_infos = api.list_models(author=author, #task="text-to-image",
|
| 574 |
+
tags=list_uniq(default_tags + tags), cardData=True, sort=sort, limit=limit)
|
| 575 |
+
except Exception as e:
|
| 576 |
+
print(f"Error: Failed to list models.")
|
| 577 |
+
print(e)
|
| 578 |
+
return models
|
| 579 |
+
for model in model_infos:
|
| 580 |
+
if not model.private and not model.gated or HF_TOKEN is not None:
|
| 581 |
+
loadable = is_loadable(model.id, force_gpu) if check_status else True
|
| 582 |
+
if not_tag and not_tag in model.tags or not loadable: continue
|
| 583 |
+
models.append(model.id)
|
| 584 |
+
if len(models) == limit: break
|
| 585 |
+
return models
|
| 586 |
+
|
| 587 |
+
|
| 588 |
+
def save_image(image, savefile, modelname, prompt, nprompt, height=0, width=0, steps=0, cfg=0, seed=-1):
|
| 589 |
+
from PIL import Image, PngImagePlugin
|
| 590 |
+
import json
|
| 591 |
+
try:
|
| 592 |
+
metadata = {"prompt": prompt, "negative_prompt": nprompt, "Model": {"Model": modelname.split("/")[-1]}}
|
| 593 |
+
if steps > 0: metadata["num_inference_steps"] = steps
|
| 594 |
+
if cfg > 0: metadata["guidance_scale"] = cfg
|
| 595 |
+
if seed != -1: metadata["seed"] = seed
|
| 596 |
+
if width > 0 and height > 0: metadata["resolution"] = f"{width} x {height}"
|
| 597 |
+
metadata_str = json.dumps(metadata)
|
| 598 |
+
info = PngImagePlugin.PngInfo()
|
| 599 |
+
info.add_text("metadata", metadata_str)
|
| 600 |
+
image.save(savefile, "PNG", pnginfo=info)
|
| 601 |
+
return str(Path(savefile).resolve())
|
| 602 |
+
except Exception as e:
|
| 603 |
+
print(f"Failed to save image file: {e}")
|
| 604 |
+
raise Exception(f"Failed to save image file:") from e
|
| 605 |
+
|
| 606 |
+
|
| 607 |
+
def randomize_seed():
|
| 608 |
+
from random import seed, randint
|
| 609 |
+
MAX_SEED = 2**32-1
|
| 610 |
+
seed()
|
| 611 |
+
rseed = randint(0, MAX_SEED)
|
| 612 |
+
return rseed
|