ford442 commited on
Commit
e28d6bf
·
verified ·
1 Parent(s): 6254fb4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -15
app.py CHANGED
@@ -274,14 +274,10 @@ import torch
274
  import paramiko
275
  import os
276
 
277
- #FTP_HOST = os.getenv("FTP_HOST")
278
  FTP_USER = os.getenv("FTP_USER")
279
- #FTP_PASS = os.getenv("FTP_PASS")
280
  FTP_DIR = os.getenv("FTP_DIR")
281
- FTP_HOST = "1ink.us"
282
- #FTP_USER = "ford442"
283
- FTP_PASS = "GoogleBez12!"
284
- #FTP_DIR = "1ink.us/stable_diff/" # Remote directory on FTP server
285
 
286
  def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
287
  # adjust the batch_size of prompt_embeds according to guidance_scale
@@ -342,7 +338,6 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
342
  f.write(f"Steps: {num_inference_steps} \n")
343
  f.write(f"Guidance Scale: {guidance_scale} \n")
344
  f.write(f"SPACE SETUP: \n")
345
- f.write(f"Model Scheduler: Euler_a all_custom before cuda \n")
346
  f.write(f"Model VAE: sdxl-vae-bf16\n")
347
  f.write(f"To cuda and bfloat \n")
348
  return filename
@@ -351,7 +346,7 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
351
 
352
  pyx = cyper.inline(code, fast_indexing=True, directives=dict(boundscheck=False, wraparound=False, language_level=3))
353
 
354
- @spaces.GPU(duration=30)
355
  def generate_30(
356
  prompt: str,
357
  negative_prompt: str = "",
@@ -362,7 +357,7 @@ def generate_30(
362
  guidance_scale: float = 4,
363
  num_inference_steps: int = 125,
364
  use_resolution_binning: bool = True,
365
- progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
366
  ):
367
  seed = random.randint(0, MAX_SEED)
368
  generator = torch.Generator(device='cuda').manual_seed(seed)
@@ -383,9 +378,7 @@ def generate_30(
383
  images = []
384
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
385
  filename = pyx.uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
386
- #upload_to_ftp(filename)
387
  pyx.upload_to_ftp(filename)
388
- #uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
389
  batch_options = options.copy()
390
  with torch.inference_mode():
391
  rv_image = pipe(**batch_options).images[0]
@@ -403,7 +396,7 @@ def generate_30(
403
  os.symlink(sd_image_path, unique_name)
404
  return [unique_name]
405
 
406
- @spaces.GPU(duration=60)
407
  def generate_60(
408
  prompt: str,
409
  negative_prompt: str = "",
@@ -414,7 +407,7 @@ def generate_60(
414
  guidance_scale: float = 4,
415
  num_inference_steps: int = 125,
416
  use_resolution_binning: bool = True,
417
- progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
418
  ):
419
  seed = random.randint(0, MAX_SEED)
420
  generator = torch.Generator(device='cuda').manual_seed(seed)
@@ -445,7 +438,7 @@ def generate_60(
445
  os.symlink(sd_image_path, unique_name)
446
  return [unique_name]
447
 
448
- @spaces.GPU(duration=90)
449
  def generate_90(
450
  prompt: str,
451
  negative_prompt: str = "",
@@ -456,7 +449,7 @@ def generate_90(
456
  guidance_scale: float = 4,
457
  num_inference_steps: int = 125,
458
  use_resolution_binning: bool = True,
459
- progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
460
  ):
461
  seed = random.randint(0, MAX_SEED)
462
  generator = torch.Generator(device='cuda').manual_seed(seed)
 
274
  import paramiko
275
  import os
276
 
277
+ FTP_HOST = os.getenv("FTP_HOST")
278
  FTP_USER = os.getenv("FTP_USER")
279
+ FTP_PASS = os.getenv("FTP_PASS")
280
  FTP_DIR = os.getenv("FTP_DIR")
 
 
 
 
281
 
282
  def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
283
  # adjust the batch_size of prompt_embeds according to guidance_scale
 
338
  f.write(f"Steps: {num_inference_steps} \n")
339
  f.write(f"Guidance Scale: {guidance_scale} \n")
340
  f.write(f"SPACE SETUP: \n")
 
341
  f.write(f"Model VAE: sdxl-vae-bf16\n")
342
  f.write(f"To cuda and bfloat \n")
343
  return filename
 
346
 
347
  pyx = cyper.inline(code, fast_indexing=True, directives=dict(boundscheck=False, wraparound=False, language_level=3))
348
 
349
+ @spaces.GPU(duration=35)
350
  def generate_30(
351
  prompt: str,
352
  negative_prompt: str = "",
 
357
  guidance_scale: float = 4,
358
  num_inference_steps: int = 125,
359
  use_resolution_binning: bool = True,
360
+ progress=gr.Progress(track_tqdm=True)
361
  ):
362
  seed = random.randint(0, MAX_SEED)
363
  generator = torch.Generator(device='cuda').manual_seed(seed)
 
378
  images = []
379
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
380
  filename = pyx.uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
 
381
  pyx.upload_to_ftp(filename)
 
382
  batch_options = options.copy()
383
  with torch.inference_mode():
384
  rv_image = pipe(**batch_options).images[0]
 
396
  os.symlink(sd_image_path, unique_name)
397
  return [unique_name]
398
 
399
+ @spaces.GPU(duration=65)
400
  def generate_60(
401
  prompt: str,
402
  negative_prompt: str = "",
 
407
  guidance_scale: float = 4,
408
  num_inference_steps: int = 125,
409
  use_resolution_binning: bool = True,
410
+ progress=gr.Progress(track_tqdm=True)
411
  ):
412
  seed = random.randint(0, MAX_SEED)
413
  generator = torch.Generator(device='cuda').manual_seed(seed)
 
438
  os.symlink(sd_image_path, unique_name)
439
  return [unique_name]
440
 
441
+ @spaces.GPU(duration=95)
442
  def generate_90(
443
  prompt: str,
444
  negative_prompt: str = "",
 
449
  guidance_scale: float = 4,
450
  num_inference_steps: int = 125,
451
  use_resolution_binning: bool = True,
452
+ progress=gr.Progress(track_tqdm=True)
453
  ):
454
  seed = random.randint(0, MAX_SEED)
455
  generator = torch.Generator(device='cuda').manual_seed(seed)