Spaces:
Runtime error
Runtime error
Commit
·
07d5f81
1
Parent(s):
fbde8d1
Update app.py
Browse filesGPT4: It seems like your app is exceeding the available memory limit of 46GB. As there are many models being loaded, one possible solution is to load models on-demand when required instead of loading all of them from the beginning. You can do this by changing the `models` list into a dictionary and modifying the init method of the `Model` class to load the models only when specifically requested.
app.py
CHANGED
@@ -22,18 +22,8 @@ class Model:
|
|
22 |
def __init__(self, name, path=""):
|
23 |
self.name = name
|
24 |
self.path = path
|
25 |
-
|
26 |
-
|
27 |
-
self.pipe_t2i = StableDiffusionPipeline.from_pretrained(
|
28 |
-
path, torch_dtype=torch.float16, safety_checker=SAFETY_CHECKER
|
29 |
-
)
|
30 |
-
self.pipe_t2i.scheduler = DPMSolverMultistepScheduler.from_config(
|
31 |
-
self.pipe_t2i.scheduler.config
|
32 |
-
)
|
33 |
-
self.pipe_i2i = StableDiffusionImg2ImgPipeline(**self.pipe_t2i.components)
|
34 |
-
else:
|
35 |
-
self.pipe_t2i = None
|
36 |
-
self.pipe_i2i = None
|
37 |
|
38 |
|
39 |
models = [
|
@@ -51,6 +41,19 @@ MODELS = {m.name: m for m in models}
|
|
51 |
|
52 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
def error_str(error, title="Error"):
|
56 |
return (
|
@@ -170,7 +173,7 @@ def img_to_img(
|
|
170 |
generator,
|
171 |
seed,
|
172 |
):
|
173 |
-
pipe =
|
174 |
|
175 |
if torch.cuda.is_available():
|
176 |
pipe = pipe.to("cuda")
|
|
|
22 |
def __init__(self, name, path=""):
|
23 |
self.name = name
|
24 |
self.path = path
|
25 |
+
self.pipe_t2i = None
|
26 |
+
self.pipe_i2i = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
|
29 |
models = [
|
|
|
41 |
|
42 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
43 |
|
44 |
+
def get_model(name):
|
45 |
+
moel = MODELS[name]
|
46 |
+
|
47 |
+
if model.pipe_t2i is None:
|
48 |
+
model.pip_t2i = StableDiffusionPipeline.from_pretrained(
|
49 |
+
model.path, torch_dtype=torch.float16, safety_checker=SAFETY_CHECKER
|
50 |
+
)
|
51 |
+
model.pipe_t2i.scheduler = DPMSolverMultistepScheduler.from_config(
|
52 |
+
mode.pipe_t2i.scheduler.config
|
53 |
+
)
|
54 |
+
model.pipe_i2i = StableDiffusionImg2ImgPipeline(**model.pipe_t2i.components)
|
55 |
+
|
56 |
+
return model
|
57 |
|
58 |
def error_str(error, title="Error"):
|
59 |
return (
|
|
|
173 |
generator,
|
174 |
seed,
|
175 |
):
|
176 |
+
pipe = model.pipe_i2i
|
177 |
|
178 |
if torch.cuda.is_available():
|
179 |
pipe = pipe.to("cuda")
|