1inkusFace commited on
Commit
dfe5f15
·
verified ·
1 Parent(s): 14f5ced

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -243,6 +243,7 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
243
  f.write(f"Model UNET: ford442/RealVisXL_V5.0_BF16 \n")
244
  upload_to_ftp(filename)
245
 
 
246
  def captioning(img):
247
  prompts_array = [
248
  # "Adjectives describing this scene are:",
@@ -264,7 +265,7 @@ def captioning(img):
264
  **inputsa,
265
  do_sample=False,
266
  num_beams=5,
267
- max_length=96,
268
  #min_length=1,
269
  top_p=0.9,
270
  repetition_penalty=1.5,
@@ -278,17 +279,19 @@ def captioning(img):
278
  # Loop through prompts array:
279
  for prompt in prompts_array:
280
  inputs = processor5(images=img, text=prompt, return_tensors="pt").to('cuda')
 
281
  generated_ids = model5.generate(
282
  **inputs,
283
  do_sample=False,
284
  num_beams=5,
285
- max_length=128,
286
- min_length=42,
287
  top_p=0.9,
288
  repetition_penalty=1.5,
289
  length_penalty=1.0,
290
  temperature=1,
291
  )
 
292
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
293
  response_text = generated_text.replace(prompt, "").strip() #Or could try .split(prompt, 1)[-1].strip()
294
  output_prompt.append(response_text)
@@ -296,7 +299,7 @@ def captioning(img):
296
 
297
  # Continue conversation:
298
  inputf = processor5(images=img, text=generated_text + 'So therefore', return_tensors="pt").to('cuda')
299
- generated_ids = model5.generate(**inputf, max_length=192)
300
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
301
  response_text = generated_text.replace(generated_text, "").strip()
302
  print(response_text)
@@ -434,7 +437,7 @@ def generate_30(
434
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
435
  filename= f'rv_IPb_{timestamp}.png'
436
  print("-- using image file --")
437
- captions =caption.append(prompt)
438
  captions = flatten_and_stringify(captions)
439
  captions = " ".join(captions)
440
  print(captions)
 
243
  f.write(f"Model UNET: ford442/RealVisXL_V5.0_BF16 \n")
244
  upload_to_ftp(filename)
245
 
246
+ @torch.no_grad()
247
  def captioning(img):
248
  prompts_array = [
249
  # "Adjectives describing this scene are:",
 
265
  **inputsa,
266
  do_sample=False,
267
  num_beams=5,
268
+ max_length=512,
269
  #min_length=1,
270
  top_p=0.9,
271
  repetition_penalty=1.5,
 
279
  # Loop through prompts array:
280
  for prompt in prompts_array:
281
  inputs = processor5(images=img, text=prompt, return_tensors="pt").to('cuda')
282
+
283
  generated_ids = model5.generate(
284
  **inputs,
285
  do_sample=False,
286
  num_beams=5,
287
+ max_length=256,
288
+ #min_length=42,
289
  top_p=0.9,
290
  repetition_penalty=1.5,
291
  length_penalty=1.0,
292
  temperature=1,
293
  )
294
+
295
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
296
  response_text = generated_text.replace(prompt, "").strip() #Or could try .split(prompt, 1)[-1].strip()
297
  output_prompt.append(response_text)
 
299
 
300
  # Continue conversation:
301
  inputf = processor5(images=img, text=generated_text + 'So therefore', return_tensors="pt").to('cuda')
302
+ generated_ids = model5.generate(**inputf, max_length=768)
303
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
304
  response_text = generated_text.replace(generated_text, "").strip()
305
  print(response_text)
 
437
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
438
  filename= f'rv_IPb_{timestamp}.png'
439
  print("-- using image file --")
440
+ captions = caption.append(flatten_and_stringify(prompt))
441
  captions = flatten_and_stringify(captions)
442
  captions = " ".join(captions)
443
  print(captions)