1inkusFace commited on
Commit
2620ec1
·
verified ·
1 Parent(s): 3fc3bdf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -8
app.py CHANGED
@@ -243,7 +243,6 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
243
  f.write(f"Model UNET: ford442/RealVisXL_V5.0_BF16 \n")
244
  upload_to_ftp(filename)
245
 
246
- @torch.no_grad()
247
  def captioning(img):
248
  prompts_array = [
249
  # "Adjectives describing this scene are:",
@@ -265,7 +264,7 @@ def captioning(img):
265
  **inputsa,
266
  do_sample=False,
267
  num_beams=5,
268
- max_length=768,
269
  min_length=64,
270
  top_p=0.9,
271
  repetition_penalty=1.5,
@@ -280,12 +279,13 @@ def captioning(img):
280
  # Loop through prompts array:
281
  for prompt in prompts_array:
282
  inputs = processor5(images=img, text=prompt, return_tensors="pt").to('cuda')
 
283
  generated_ids = model5.generate(
284
  **inputs,
285
  do_sample=False,
286
  num_beams=5,
287
  max_length=64,
288
- #min_length=42,
289
  top_p=0.9,
290
  repetition_penalty=1.5,
291
  length_penalty=1.0,
@@ -297,12 +297,24 @@ def captioning(img):
297
  print(f"{response_text}\n")
298
  inputf = processor5(
299
  images=img,
300
- text=generated_text + 'So therefore',
301
  return_tensors="pt"
302
  ).to('cuda')
303
- generated_ids = model5.generate(**inputf, max_length=128)
 
 
 
 
 
 
 
 
 
 
 
304
  generated_texta = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
305
  response_text = generated_texta.replace(generated_text, "").strip()
 
306
  output_prompt.append(response_text)
307
  output_prompt = " ".join(output_prompt)
308
  return output_prompt
@@ -437,8 +449,12 @@ def generate_30(
437
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
438
  filename= f'rv_IPb_{timestamp}.png'
439
  print("-- using image file --")
 
 
440
  prompt = " ".join(prompt)
441
- captions = " ".join(captions)
 
 
442
  print(captions)
443
  print("-- not generating further caption --")
444
  global model5
@@ -562,8 +578,12 @@ def generate_60(
562
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
563
  filename= f'rv_IPb_{timestamp}.png'
564
  print("-- using image file --")
 
 
565
  prompt = " ".join(prompt)
566
- captions = " ".join(captions)
 
 
567
  print(captions)
568
  print("-- not generating further caption --")
569
  global model5
@@ -687,8 +707,12 @@ def generate_90(
687
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
688
  filename= f'rv_IPb_{timestamp}.png'
689
  print("-- using image file --")
 
 
690
  prompt = " ".join(prompt)
691
- captions = " ".join(captions)
 
 
692
  print(captions)
693
  print("-- not generating further caption --")
694
  global model5
 
243
  f.write(f"Model UNET: ford442/RealVisXL_V5.0_BF16 \n")
244
  upload_to_ftp(filename)
245
 
 
246
  def captioning(img):
247
  prompts_array = [
248
  # "Adjectives describing this scene are:",
 
264
  **inputsa,
265
  do_sample=False,
266
  num_beams=5,
267
+ max_length=256,
268
  min_length=64,
269
  top_p=0.9,
270
  repetition_penalty=1.5,
 
279
  # Loop through prompts array:
280
  for prompt in prompts_array:
281
  inputs = processor5(images=img, text=prompt, return_tensors="pt").to('cuda')
282
+ #with torch.no_grad():
283
  generated_ids = model5.generate(
284
  **inputs,
285
  do_sample=False,
286
  num_beams=5,
287
  max_length=64,
288
+ min_length=24,
289
  top_p=0.9,
290
  repetition_penalty=1.5,
291
  length_penalty=1.0,
 
297
  print(f"{response_text}\n")
298
  inputf = processor5(
299
  images=img,
300
+ text=generated_text + ' So therefore, ',
301
  return_tensors="pt"
302
  ).to('cuda')
303
+ generated_ids = model5.generate(
304
+ **inputf,
305
+ do_sample=False,
306
+ num_beams=5,
307
+ max_length=256,
308
+ min_length=24,
309
+ top_p=0.9,
310
+ repetition_penalty=1.5,
311
+ length_penalty=1.0,
312
+ temperature=1,
313
+ )
314
+ )
315
  generated_texta = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
316
  response_text = generated_texta.replace(generated_text, "").strip()
317
+ print(f"{response_text}\n")
318
  output_prompt.append(response_text)
319
  output_prompt = " ".join(output_prompt)
320
  return output_prompt
 
449
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
450
  filename= f'rv_IPb_{timestamp}.png'
451
  print("-- using image file --")
452
+ print("-- CURRENT PROMPT --")
453
+ print(prompt)
454
  prompt = " ".join(prompt)
455
+ print("-- CURRENT PROMPT AFTER .join --")
456
+ print(prompt)
457
+ captions = " ".join(caption)
458
  print(captions)
459
  print("-- not generating further caption --")
460
  global model5
 
578
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
579
  filename= f'rv_IPb_{timestamp}.png'
580
  print("-- using image file --")
581
+ print("-- CURRENT PROMPT --")
582
+ print(prompt)
583
  prompt = " ".join(prompt)
584
+ print("-- CURRENT PROMPT AFTER .join --")
585
+ print(prompt)
586
+ captions = " ".join(caption)
587
  print(captions)
588
  print("-- not generating further caption --")
589
  global model5
 
707
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
708
  filename= f'rv_IPb_{timestamp}.png'
709
  print("-- using image file --")
710
+ print("-- CURRENT PROMPT --")
711
+ print(prompt)
712
  prompt = " ".join(prompt)
713
+ print("-- CURRENT PROMPT AFTER .join --")
714
+ print(prompt)
715
+ captions = " ".join(caption)
716
  print(captions)
717
  print("-- not generating further caption --")
718
  global model5