ford442 commited on
Commit
fc02b93
·
verified ·
1 Parent(s): f35cb4b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -14
app.py CHANGED
@@ -104,6 +104,7 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
104
  if not negative:
105
  negative = ""
106
  return p.replace("{prompt}", positive), n + negative
 
107
  def load_and_prepare_model(model_id):
108
  model_dtypes = {"ford442/RealVisXL_V5.0_BF16": torch.bfloat16,}
109
  dtype = model_dtypes.get(model_id, torch.bfloat16) # Default to float32 if not found
@@ -210,25 +211,22 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
210
  seed = random.randint(0, MAX_SEED)
211
  return seed
212
 
213
- def uploadNote():
214
- # write note txt
215
- filename= f'tst_B_{seed}.txt'
216
- timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
217
  with open(filename, "w") as f:
218
- f.write(f"Realvis 5.0 (Tester B): {seed} png\n")
219
  f.write(f"Date/time: {timestamp} \n")
220
  f.write(f"Prompt: {prompt} \n")
221
  f.write(f"Steps: {num_inference_steps} \n")
222
  f.write(f"Guidance Scale: {guidance_scale} \n")
223
  f.write(f"SPACE SETUP: \n")
224
  f.write(f"Use Model Dtype: no \n")
225
- f.write(f"Model Scheduler: Euler_a all_custom before cuda woth trailing \n")
226
- f.write(f"Model VAE: sdxl-vae to bfloat with cuda then attn_proc \n")
227
- f.write(f"Model UNET: default to bfloat with cuda \n")
228
  f.write(f"Model HiDiffusion OFF \n")
229
- f.write(f"Model do_resize OFF \n")
230
- f.write(f"Model vae_scale_factor 8 \n")
231
- f.write(f"now added packages.txt \n")
232
  upload_to_ftp(filename)
233
 
234
  @spaces.GPU(duration=30)
@@ -271,7 +269,8 @@ def generate_30(
271
  options["use_resolution_binning"] = True
272
  images = []
273
  pipe.scheduler.set_timesteps(num_inference_steps,device)
274
- uploadNote()
 
275
  for i in range(0, num_images, BATCH_SIZE):
276
  batch_options = options.copy()
277
  batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
@@ -326,7 +325,8 @@ def generate_60(
326
  options["use_resolution_binning"] = True
327
  images = []
328
  pipe.scheduler.set_timesteps(num_inference_steps,device)
329
- uploadNote()
 
330
  for i in range(0, num_images, BATCH_SIZE):
331
  batch_options = options.copy()
332
  batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
@@ -381,7 +381,8 @@ def generate_90(
381
  options["use_resolution_binning"] = True
382
  images = []
383
  pipe.scheduler.set_timesteps(num_inference_steps,device)
384
- uploadNote()
 
385
  for i in range(0, num_images, BATCH_SIZE):
386
  batch_options = options.copy()
387
  batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
 
104
  if not negative:
105
  negative = ""
106
  return p.replace("{prompt}", positive), n + negative
107
+
108
  def load_and_prepare_model(model_id):
109
  model_dtypes = {"ford442/RealVisXL_V5.0_BF16": torch.bfloat16,}
110
  dtype = model_dtypes.get(model_id, torch.bfloat16) # Default to float32 if not found
 
211
  seed = random.randint(0, MAX_SEED)
212
  return seed
213
 
214
+ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
215
+ filename= f'tst_A_{timestamp}.txt'
 
 
216
  with open(filename, "w") as f:
217
+ f.write(f"Realvis 5.0 (Tester A) \n")
218
  f.write(f"Date/time: {timestamp} \n")
219
  f.write(f"Prompt: {prompt} \n")
220
  f.write(f"Steps: {num_inference_steps} \n")
221
  f.write(f"Guidance Scale: {guidance_scale} \n")
222
  f.write(f"SPACE SETUP: \n")
223
  f.write(f"Use Model Dtype: no \n")
224
+ f.write(f"Model Scheduler: Euler_a all_custom before cuda \n")
225
+ f.write(f"Model VAE: sdxl-vae to bfloat safetensor=false before cuda then attn_proc / scale factor 8 \n")
226
+ f.write(f"Model UNET: default ford442/RealVisXL_V5.0_BF16 \n")
227
  f.write(f"Model HiDiffusion OFF \n")
228
+ f.write(f"Model do_resize ON \n")
229
+ f.write(f"added torch to prereq and changed accellerate \n")
 
230
  upload_to_ftp(filename)
231
 
232
  @spaces.GPU(duration=30)
 
269
  options["use_resolution_binning"] = True
270
  images = []
271
  pipe.scheduler.set_timesteps(num_inference_steps,device)
272
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
273
+ uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
274
  for i in range(0, num_images, BATCH_SIZE):
275
  batch_options = options.copy()
276
  batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
 
325
  options["use_resolution_binning"] = True
326
  images = []
327
  pipe.scheduler.set_timesteps(num_inference_steps,device)
328
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
329
+ uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
330
  for i in range(0, num_images, BATCH_SIZE):
331
  batch_options = options.copy()
332
  batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
 
381
  options["use_resolution_binning"] = True
382
  images = []
383
  pipe.scheduler.set_timesteps(num_inference_steps,device)
384
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
385
+ uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
386
  for i in range(0, num_images, BATCH_SIZE):
387
  batch_options = options.copy()
388
  batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]