1inkusFace commited on
Commit
8a63394
·
verified ·
1 Parent(s): 690a432

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -10
app.py CHANGED
@@ -227,13 +227,9 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
227
  f.write(f"Steps: {num_inference_steps} \n")
228
  f.write(f"Guidance Scale: {guidance_scale} \n")
229
  f.write(f"SPACE SETUP: \n")
230
- f.write(f"Use Model Dtype: no \n")
231
- f.write(f"Model Scheduler: Euler_a all_custom before cuda \n")
232
- f.write(f"Model VAE: sdxl-vae to bfloat safetensor=false before cuda then attn_proc / scale factor 8 \n")
233
  f.write(f"Model UNET: ford442/RealVisXL_V5.0_BF16 \n")
234
  upload_to_ftp(filename)
235
 
236
-
237
  def captioning(img):
238
  prompts_array = [
239
  "Adjectives describing this scene are:",
@@ -246,15 +242,12 @@ def captioning(img):
246
  "The setting of this scene must be located",
247
  # Add more prompts here
248
  ]
249
-
250
  output_prompt=[]
251
-
252
  # Initial caption generation without a prompt:
253
  inputsa = processor5(images=img, return_tensors="pt").to('cuda')
254
  generated_ids = model5.generate(**inputsa, min_length=42, max_length=42)
255
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
256
  print(generated_text)
257
-
258
  # Loop through prompts array:
259
  for prompt in prompts_array:
260
  inputs = processor5(images=img, text=prompt, return_tensors="pt").to('cuda')
@@ -263,7 +256,6 @@ def captioning(img):
263
  response_text = generated_text.replace(prompt, "").strip() #Or could try .split(prompt, 1)[-1].strip()
264
  output_prompt.append(response_text)
265
  print(f"{response_text}\n") # Print only the response text
266
-
267
  # Continue conversation:
268
  inputf = processor5(images=img, text=generated_text + 'So therefore', return_tensors="pt").to('cuda')
269
  generated_ids = model5.generate(**inputf, max_length=42)
@@ -274,7 +266,6 @@ def captioning(img):
274
  print(output_prompt)
275
  return output_prompt
276
 
277
-
278
  def expand_prompt(prompt):
279
  system_prompt_rewrite = (
280
  "You are an AI assistant that rewrites image prompts to be more descriptive and detailed."
@@ -326,7 +317,7 @@ def expand_prompt(prompt):
326
  print('-- filtered prompt 2 --')
327
  print(enhanced_prompt_2)
328
  enh_prompt=[enhanced_prompt,enhanced_prompt_2]
329
- return enh_prompt
330
 
331
  @spaces.GPU(duration=40)
332
  def generate_30(
 
227
  f.write(f"Steps: {num_inference_steps} \n")
228
  f.write(f"Guidance Scale: {guidance_scale} \n")
229
  f.write(f"SPACE SETUP: \n")
 
 
 
230
  f.write(f"Model UNET: ford442/RealVisXL_V5.0_BF16 \n")
231
  upload_to_ftp(filename)
232
 
 
233
  def captioning(img):
234
  prompts_array = [
235
  "Adjectives describing this scene are:",
 
242
  "The setting of this scene must be located",
243
  # Add more prompts here
244
  ]
 
245
  output_prompt=[]
 
246
  # Initial caption generation without a prompt:
247
  inputsa = processor5(images=img, return_tensors="pt").to('cuda')
248
  generated_ids = model5.generate(**inputsa, min_length=42, max_length=42)
249
  generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
250
  print(generated_text)
 
251
  # Loop through prompts array:
252
  for prompt in prompts_array:
253
  inputs = processor5(images=img, text=prompt, return_tensors="pt").to('cuda')
 
256
  response_text = generated_text.replace(prompt, "").strip() #Or could try .split(prompt, 1)[-1].strip()
257
  output_prompt.append(response_text)
258
  print(f"{response_text}\n") # Print only the response text
 
259
  # Continue conversation:
260
  inputf = processor5(images=img, text=generated_text + 'So therefore', return_tensors="pt").to('cuda')
261
  generated_ids = model5.generate(**inputf, max_length=42)
 
266
  print(output_prompt)
267
  return output_prompt
268
 
 
269
  def expand_prompt(prompt):
270
  system_prompt_rewrite = (
271
  "You are an AI assistant that rewrites image prompts to be more descriptive and detailed."
 
317
  print('-- filtered prompt 2 --')
318
  print(enhanced_prompt_2)
319
  enh_prompt=[enhanced_prompt,enhanced_prompt_2]
320
+ return enh_prompt
321
 
322
  @spaces.GPU(duration=40)
323
  def generate_30(