yasserrmd commited on
Commit
c7877ea
·
verified ·
1 Parent(s): b8330cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -90
app.py CHANGED
@@ -22,8 +22,7 @@ model_path = "Collov-Labs/Monetico"
22
 
23
  model = Transformer2DModel.from_pretrained(model_path, subfolder="transformer", torch_dtype=dtype)
24
  vq_model = VQModel.from_pretrained(model_path, subfolder="vqvae", torch_dtype=dtype)
25
- # text_encoder = CLIPTextModelWithProjection.from_pretrained(model_path, subfolder="text_encoder")
26
- text_encoder = CLIPTextModelWithProjection.from_pretrained( #more stable sampling for some cases
27
  "laion/CLIP-ViT-H-14-laion2B-s32B-b79K", torch_dtype=dtype
28
  )
29
  tokenizer = CLIPTokenizer.from_pretrained(model_path, subfolder="tokenizer", torch_dtype=dtype)
@@ -32,27 +31,24 @@ pipe = Pipeline(vq_model, tokenizer=tokenizer, text_encoder=text_encoder, transf
32
  pipe.to(device)
33
 
34
  MAX_SEED = 2**32 - 1
35
- MAX_IMAGE_SIZE = 512
36
 
37
  @spaces.GPU
38
- def generate_image(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
 
39
  if randomize_seed or seed == 0:
40
  seed = torch.randint(0, MAX_SEED, (1,)).item()
41
  torch.manual_seed(seed)
42
 
43
  image = pipe(
44
  prompt=prompt,
45
- negative_prompt=negative_prompt,
46
- height=height,
47
- width=width,
48
- guidance_scale=guidance_scale,
49
- num_inference_steps=num_inference_steps
50
  ).images[0]
51
 
52
  return image, seed
53
 
54
- # Default negative prompt
55
- default_negative_prompt = "worst quality, low quality, low res, blurry, distortion, watermark, logo, signature, text, jpeg artifacts, signature, sketch, duplicate, ugly, identifying mark"
56
  css = """
57
  #col-container {
58
  margin: 0 auto;
@@ -61,91 +57,40 @@ css = """
61
  """
62
 
63
  examples = [
64
- "Modern Architecture render with pleasing aesthetics.",
65
- "An image of a Pikachu wearing a birthday hat and playing guitar.",
66
- "A statue of a lion stands in front of a building.",
67
- "A white and blue coffee mug with a picture of a man on it.",
68
- "A metal sculpture of a deer with antlers.",
69
- "A bronze statue of an owl with its wings spread.",
70
- "A white table with a vase of flowers and a cup of coffee on top of it.",
71
- "A woman stands on a dock in the fog.",
72
- "A lion's head is shown in a grayscale image.",
73
- "A sculpture of a Greek woman head with a headband and a head of hair."
 
 
 
 
 
 
74
  ]
75
 
76
  with gr.Blocks(css=css) as demo:
77
  with gr.Column(elem_id="col-container"):
78
- gr.Markdown("# Monetico Text-to-Image Generator")
79
  with gr.Row():
80
- prompt = gr.Text(
81
- label="Prompt",
82
- show_label=False,
83
- max_lines=1,
84
- placeholder="Enter your prompt",
85
- container=False,
86
- )
87
- run_button = gr.Button("Run", scale=0, variant="primary")
88
- result = gr.Image(label="Result", show_label=False)
89
- with gr.Accordion("Advanced Settings", open=False):
90
- negative_prompt = gr.Text(
91
- label="Negative prompt",
92
- max_lines=1,
93
- placeholder="Enter a negative prompt",
94
- value=default_negative_prompt,
95
- )
96
- seed = gr.Slider(
97
- label="Seed",
98
- minimum=0,
99
- maximum=MAX_SEED,
100
- step=1,
101
- value=0,
102
- )
103
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
104
- with gr.Row():
105
- width = gr.Slider(
106
- label="Width",
107
- minimum=256,
108
- maximum=MAX_IMAGE_SIZE,
109
- step=32,
110
- value=512,
111
- )
112
- height = gr.Slider(
113
- label="Height",
114
- minimum=256,
115
- maximum=MAX_IMAGE_SIZE,
116
- step=32,
117
- value=512,
118
- )
119
- with gr.Row():
120
- guidance_scale = gr.Slider(
121
- label="Guidance scale",
122
- minimum=0.0,
123
- maximum=20.0,
124
- step=0.1,
125
- value=9.0,
126
- )
127
- num_inference_steps = gr.Slider(
128
- label="Number of inference steps",
129
- minimum=1,
130
- maximum=100,
131
- step=1,
132
- value=48,
133
- )
134
- gr.Examples(examples=examples, inputs=[prompt])
135
  gr.on(
136
- triggers=[run_button.click, prompt.submit],
137
  fn=generate_image,
138
- inputs=[
139
- prompt,
140
- negative_prompt,
141
- seed,
142
- randomize_seed,
143
- width,
144
- height,
145
- guidance_scale,
146
- num_inference_steps,
147
- ],
148
- outputs=[result, seed],
149
  )
150
 
151
- demo.launch()
 
22
 
23
  model = Transformer2DModel.from_pretrained(model_path, subfolder="transformer", torch_dtype=dtype)
24
  vq_model = VQModel.from_pretrained(model_path, subfolder="vqvae", torch_dtype=dtype)
25
+ text_encoder = CLIPTextModelWithProjection.from_pretrained(
 
26
  "laion/CLIP-ViT-H-14-laion2B-s32B-b79K", torch_dtype=dtype
27
  )
28
  tokenizer = CLIPTokenizer.from_pretrained(model_path, subfolder="tokenizer", torch_dtype=dtype)
 
31
  pipe.to(device)
32
 
33
  MAX_SEED = 2**32 - 1
 
34
 
35
  @spaces.GPU
36
+ def generate_image(occasion, theme, colors, randomize_seed=True, seed=0):
37
+ prompt = f"{occasion} theme: {theme}, colors: {colors} design inspiration"
38
  if randomize_seed or seed == 0:
39
  seed = torch.randint(0, MAX_SEED, (1,)).item()
40
  torch.manual_seed(seed)
41
 
42
  image = pipe(
43
  prompt=prompt,
44
+ height=512,
45
+ width=512,
46
+ guidance_scale=9.0,
47
+ num_inference_steps=50
 
48
  ).images[0]
49
 
50
  return image, seed
51
 
 
 
52
  css = """
53
  #col-container {
54
  margin: 0 auto;
 
57
  """
58
 
59
  examples = [
60
+ {"occasion": "Wedding", "theme": "Vintage Elegance", "colors": "white and gold"},
61
+ {"occasion": "Corporate Anniversary", "theme": "Legacy & Growth", "colors": "navy and silver"},
62
+ {"occasion": "Product Launch", "theme": "Innovation Spark", "colors": "blue and white"},
63
+ {"occasion": "Team Appreciation", "theme": "Together We Thrive", "colors": "green and gold"},
64
+ {"occasion": "Award Ceremony", "theme": "Excellence Awards", "colors": "black and gold"},
65
+ {"occasion": "Milestone Celebration", "theme": "10 Years Strong", "colors": "emerald green and silver"},
66
+ {"occasion": "Holiday Party", "theme": "Winter Festivity", "colors": "silver and blue"},
67
+ {"occasion": "Sales Achievement", "theme": "Peak Performers", "colors": "crimson and gray"},
68
+ {"occasion": "Client Appreciation", "theme": "Thank You Event", "colors": "ivory and gold"},
69
+ {"occasion": "Office Opening", "theme": "New Beginnings", "colors": "teal and white"},
70
+ {"occasion": "Retirement Celebration", "theme": "Years of Dedication", "colors": "bronze and navy"},
71
+ {"occasion": "Quarterly Town Hall", "theme": "United Vision", "colors": "purple and silver"},
72
+ {"occasion": "Annual Conference", "theme": "Forward Together", "colors": "black and royal blue"},
73
+ {"occasion": "Workshop Event", "theme": "Skill Building", "colors": "orange and gray"},
74
+ {"occasion": "Networking Gala", "theme": "Professional Connections", "colors": "champagne and gold"},
75
+ {"occasion": "Leadership Retreat", "theme": "Inspire & Lead", "colors": "forest green and white"},
76
  ]
77
 
78
  with gr.Blocks(css=css) as demo:
79
  with gr.Column(elem_id="col-container"):
80
+ gr.Markdown("# Cake & Gift Design Inspiration")
81
  with gr.Row():
82
+ occasion = gr.Text(label="Occasion", placeholder="Enter occasion, e.g., Wedding, Birthday")
83
+ theme = gr.Text(label="Theme", placeholder="Enter theme, e.g., Vintage, Space Adventure")
84
+ colors = gr.Text(label="Colors", placeholder="Enter colors, e.g., white and gold")
85
+ run_button = gr.Button("Generate Design", variant="primary")
86
+ result = gr.Image(label="Generated Design", show_label=False)
87
+ gr.Examples(examples=examples, inputs=[occasion, theme, colors])
88
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  gr.on(
90
+ triggers=[run_button.click],
91
  fn=generate_image,
92
+ inputs=[occasion, theme, colors],
93
+ outputs=[result],
 
 
 
 
 
 
 
 
 
94
  )
95
 
96
+ demo.launch()