Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -241,7 +241,7 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
241 |
seed = random.randint(0, MAX_SEED)
|
242 |
return seed
|
243 |
|
244 |
-
def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
|
245 |
filename= f'tst_A_{timestamp}.txt'
|
246 |
with open(filename, "w") as f:
|
247 |
f.write(f"Realvis 5.0 (Tester A) \n")
|
@@ -249,6 +249,7 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
|
|
249 |
f.write(f"Prompt: {prompt} \n")
|
250 |
f.write(f"Steps: {num_inference_steps} \n")
|
251 |
f.write(f"Guidance Scale: {guidance_scale} \n")
|
|
|
252 |
f.write(f"SPACE SETUP: \n")
|
253 |
f.write(f"Use Model Dtype: no \n")
|
254 |
f.write(f"Model Scheduler: Euler_a all_custom before cuda \n")
|
@@ -278,6 +279,7 @@ def generate_30(
|
|
278 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
279 |
|
280 |
):
|
|
|
281 |
torch.backends.cudnn.benchmark = False
|
282 |
torch.cuda.empty_cache()
|
283 |
gc.collect()
|
@@ -304,7 +306,7 @@ def generate_30(
|
|
304 |
images = []
|
305 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
306 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
307 |
-
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
308 |
for i in range(0, num_images, BATCH_SIZE):
|
309 |
batch_options = options.copy()
|
310 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
@@ -371,7 +373,7 @@ def generate_60(
|
|
371 |
images = []
|
372 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
373 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
374 |
-
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
375 |
for i in range(0, num_images, BATCH_SIZE):
|
376 |
batch_options = options.copy()
|
377 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
@@ -438,7 +440,7 @@ def generate_90(
|
|
438 |
images = []
|
439 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
440 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
441 |
-
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
|
442 |
for i in range(0, num_images, BATCH_SIZE):
|
443 |
batch_options = options.copy()
|
444 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
|
|
241 |
seed = random.randint(0, MAX_SEED)
|
242 |
return seed
|
243 |
|
244 |
+
def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise):
|
245 |
filename= f'tst_A_{timestamp}.txt'
|
246 |
with open(filename, "w") as f:
|
247 |
f.write(f"Realvis 5.0 (Tester A) \n")
|
|
|
249 |
f.write(f"Prompt: {prompt} \n")
|
250 |
f.write(f"Steps: {num_inference_steps} \n")
|
251 |
f.write(f"Guidance Scale: {guidance_scale} \n")
|
252 |
+
f.write(f"Denoise Strength: {denoise} \n")
|
253 |
f.write(f"SPACE SETUP: \n")
|
254 |
f.write(f"Use Model Dtype: no \n")
|
255 |
f.write(f"Model Scheduler: Euler_a all_custom before cuda \n")
|
|
|
279 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
280 |
|
281 |
):
|
282 |
+
print(f'debug: num_images: {num_images} denoise: {denoise}')
|
283 |
torch.backends.cudnn.benchmark = False
|
284 |
torch.cuda.empty_cache()
|
285 |
gc.collect()
|
|
|
306 |
images = []
|
307 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
308 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
309 |
+
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
310 |
for i in range(0, num_images, BATCH_SIZE):
|
311 |
batch_options = options.copy()
|
312 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
|
|
373 |
images = []
|
374 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
375 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
376 |
+
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
377 |
for i in range(0, num_images, BATCH_SIZE):
|
378 |
batch_options = options.copy()
|
379 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
|
|
440 |
images = []
|
441 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
442 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
443 |
+
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
444 |
for i in range(0, num_images, BATCH_SIZE):
|
445 |
batch_options = options.copy()
|
446 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|