panelforge commited on
Commit
0ad6bbd
·
verified ·
1 Parent(s): b45e3ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -11
app.py CHANGED
@@ -4,8 +4,6 @@ import random
4
  import spaces # [uncomment to use ZeroGPU]
5
  from diffusers import DiffusionPipeline
6
  import torch
7
- import io
8
- from PIL import Image
9
  from tags import participant_tags, tribe_tags, skin_tone_tags, body_type_tags, tattoo_tags, piercing_tags, expression_tags, eye_tags, hair_style_tags, position_tags, fetish_tags, location_tags, camera_tags, atmosphere_tags
10
 
11
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -30,8 +28,10 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
30
  selected_camera_tags, selected_atmosphere_tags, active_tab, progress=gr.Progress(track_tqdm=True)):
31
 
32
  if active_tab == "Prompt Input":
 
33
  final_prompt = f'score_9, score_8_up, score_7_up, source_anime, {prompt}'
34
  else:
 
35
  selected_tags = (
36
  [participant_tags[tag] for tag in selected_participant_tags] +
37
  [tribe_tags[tag] for tag in selected_tribe_tags] +
@@ -51,6 +51,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
51
  tags_text = ', '.join(selected_tags)
52
  final_prompt = f'score_9, score_8_up, score_7_up, source_anime, {tags_text}'
53
 
 
54
  additional_negatives = "worst quality, bad quality, jpeg artifacts, source_cartoon, 3d, (censor), monochrome, blurry, lowres, watermark"
55
  full_negative_prompt = f"{additional_negatives}, {negative_prompt}"
56
 
@@ -59,7 +60,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
59
 
60
  generator = torch.Generator().manual_seed(seed)
61
 
62
- # Generate the image
63
  image = pipe(
64
  prompt=final_prompt,
65
  negative_prompt=full_negative_prompt,
@@ -70,14 +71,8 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
70
  generator=generator
71
  ).images[0]
72
 
73
- # Convert the image to the desired format (JPEG or PNG)
74
- img_byte_arr = io.BytesIO()
75
- image_format = "PNG" # You can change this to "JPEG" if you prefer JPEG
76
- image.save(img_byte_arr, format=image_format)
77
- img_byte_arr.seek(0) # Reset pointer to the beginning of the BytesIO object
78
-
79
- # Return the image as a file (PNG or JPEG), seed, and the used prompts
80
- return img_byte_arr, seed, f"Prompt used: {final_prompt}\nNegative prompt used: {full_negative_prompt}"
81
 
82
 
83
  css = """
 
4
  import spaces # [uncomment to use ZeroGPU]
5
  from diffusers import DiffusionPipeline
6
  import torch
 
 
7
  from tags import participant_tags, tribe_tags, skin_tone_tags, body_type_tags, tattoo_tags, piercing_tags, expression_tags, eye_tags, hair_style_tags, position_tags, fetish_tags, location_tags, camera_tags, atmosphere_tags
8
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
28
  selected_camera_tags, selected_atmosphere_tags, active_tab, progress=gr.Progress(track_tqdm=True)):
29
 
30
  if active_tab == "Prompt Input":
31
+ # Use the user-provided prompt
32
  final_prompt = f'score_9, score_8_up, score_7_up, source_anime, {prompt}'
33
  else:
34
+ # Use tags from the "Tag Selection" tab
35
  selected_tags = (
36
  [participant_tags[tag] for tag in selected_participant_tags] +
37
  [tribe_tags[tag] for tag in selected_tribe_tags] +
 
51
  tags_text = ', '.join(selected_tags)
52
  final_prompt = f'score_9, score_8_up, score_7_up, source_anime, {tags_text}'
53
 
54
+ # Concatenate user-provided negative prompt with additional restrictions
55
  additional_negatives = "worst quality, bad quality, jpeg artifacts, source_cartoon, 3d, (censor), monochrome, blurry, lowres, watermark"
56
  full_negative_prompt = f"{additional_negatives}, {negative_prompt}"
57
 
 
60
 
61
  generator = torch.Generator().manual_seed(seed)
62
 
63
+ # Generate the image with the final prompts
64
  image = pipe(
65
  prompt=final_prompt,
66
  negative_prompt=full_negative_prompt,
 
71
  generator=generator
72
  ).images[0]
73
 
74
+ # Return image, seed, and the used prompts
75
+ return image, seed, f"Prompt used: {final_prompt}\nNegative prompt used: {full_negative_prompt}"
 
 
 
 
 
 
76
 
77
 
78
  css = """