Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -124,7 +124,7 @@ def load_and_prepare_model(model_id):
|
|
124 |
# Preload and compile both models
|
125 |
models = {key: load_and_prepare_model(value) for key, value in MODEL_OPTIONS.items()}
|
126 |
|
127 |
-
MAX_SEED = np.iinfo(np.
|
128 |
|
129 |
def upload_to_ftp(filename):
|
130 |
try:
|
@@ -148,8 +148,22 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
148 |
if randomize_seed:
|
149 |
seed = random.randint(0, MAX_SEED)
|
150 |
return seed
|
|
|
151 |
|
152 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
153 |
def generate(
|
154 |
model_choice: str,
|
155 |
prompt: str,
|
@@ -169,7 +183,7 @@ def generate(
|
|
169 |
global models
|
170 |
pipe = models[model_choice]
|
171 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
172 |
-
generator = torch.Generator(device='
|
173 |
|
174 |
prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
175 |
|
@@ -188,7 +202,8 @@ def generate(
|
|
188 |
options["use_resolution_binning"] = True
|
189 |
|
190 |
images = []
|
191 |
-
|
|
|
192 |
batch_options = options.copy()
|
193 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
194 |
if "negative_prompt" in batch_options:
|
|
|
124 |
# Preload and compile both models
|
125 |
models = {key: load_and_prepare_model(value) for key, value in MODEL_OPTIONS.items()}
|
126 |
|
127 |
+
MAX_SEED = np.iinfo(np.int32).max
|
128 |
|
129 |
def upload_to_ftp(filename):
|
130 |
try:
|
|
|
148 |
if randomize_seed:
|
149 |
seed = random.randint(0, MAX_SEED)
|
150 |
return seed
|
151 |
+
|
152 |
|
153 |
+
|
154 |
+
GPU_DURATION_OPTIONS = {
|
155 |
+
"Short (45s)": 45,
|
156 |
+
"Short (60s)": 60,
|
157 |
+
"Medium (80s)": 80,
|
158 |
+
"Medium (100s)": 100,
|
159 |
+
"Long (120s)": 120,
|
160 |
+
"Long (140s)": 140,
|
161 |
+
}
|
162 |
+
|
163 |
+
def set_gpu_duration(duration_choice):
|
164 |
+
os.environ["GPU_DURATION"] = str(GPU_DURATION_OPTIONS[duration_choice])
|
165 |
+
|
166 |
+
@spaces.GPU(duration=int(os.getenv("GPU_DURATION", "80"))) # Dynamic duration
|
167 |
def generate(
|
168 |
model_choice: str,
|
169 |
prompt: str,
|
|
|
183 |
global models
|
184 |
pipe = models[model_choice]
|
185 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
186 |
+
generator = torch.Generator(device='cpu').manual_seed(seed)
|
187 |
|
188 |
prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
189 |
|
|
|
202 |
options["use_resolution_binning"] = True
|
203 |
|
204 |
images = []
|
205 |
+
with torch.no_grad():
|
206 |
+
for i in range(0, num_images, BATCH_SIZE):
|
207 |
batch_options = options.copy()
|
208 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
209 |
if "negative_prompt" in batch_options:
|