marahmerah commited on
Commit
1af6ba9
·
verified ·
1 Parent(s): 80ae4a1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -75
app.py CHANGED
@@ -1,13 +1,14 @@
 
1
  import os
2
  import gradio as gr
3
  import numpy as np
4
  import random
5
  from huggingface_hub import AsyncInferenceClient
6
  from translatepy import Translator
 
 
7
  import asyncio
8
  from PIL import Image
9
- import io
10
- import base64
11
 
12
  translator = Translator()
13
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
@@ -35,26 +36,25 @@ def enable_lora(lora_add):
35
  return lora_add
36
 
37
  async def generate_image(
38
- prompt: str,
39
- model: str,
40
- lora_word: str,
41
- width: int = 768,
42
- height: int = 1024,
43
- scales: float = 3.5,
44
- steps: int = 24,
45
- seed: int = -1,
46
- ):
47
  if seed == -1:
48
  seed = random.randint(0, MAX_SEED)
49
  seed = int(seed)
50
- print(f'prompt: {prompt}')
51
-
52
  text = str(translator.translate(prompt, 'English')) + "," + lora_word
53
 
54
  client = AsyncInferenceClient()
55
  try:
56
- # Generate image using the Inference API
57
- image_bytes = await client.text_to_image(
58
  prompt=text,
59
  height=height,
60
  width=width,
@@ -62,58 +62,44 @@ async def generate_image(
62
  num_inference_steps=steps,
63
  model=model,
64
  )
65
- return image_bytes, seed
66
  except Exception as e:
67
  raise gr.Error(f"Error in {e}")
 
 
68
 
69
  async def gen(
70
- prompt: str,
71
- lora_add: str = "",
72
- lora_word: str = "",
73
- width: int = 768,
74
- height: int = 1024,
75
- scales: float = 3.5,
76
- steps: int = 24,
77
- seed: int = -1,
78
- progress=gr.Progress(track_tqdm=True),
79
  ):
80
  model = enable_lora(lora_add)
81
  print(model)
82
- image, seed = await generate_image(prompt, model, lora_word, width, height, scales, steps, seed)
83
  return image, seed
84
-
85
- def export_image(image_bytes, format: str = "png"):
86
- """Convert image to PNG or Base64."""
87
- img = Image.open(io.BytesIO(image_bytes))
88
- if format == "png":
89
- png_buffer = io.BytesIO()
90
- img.save(png_buffer, format="PNG")
91
- png_buffer.seek(0)
92
- return png_buffer
93
- elif format == "base64":
94
- buffered = io.BytesIO()
95
- img.save(buffered, format="PNG")
96
- img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
97
- return f"data:image/png;base64,{img_base64}"
98
- else:
99
- raise ValueError("Unsupported format. Use 'png' or 'base64'.")
100
-
101
  examples = [
102
- ["wanita cantik sedang duduk di pinggir kolam renang memakai bikini warna pink", "burhansyam/davina", "wanita"],
103
- ["a seal holding a beach ball in a pool", "bingbangboom/flux_dreamscape", "in the style of BSstyle004"],
104
- ["a tourist in London, illustration in the style of VCTRNDRWNG, Victorian-era drawing", "dvyio/flux-lora-victorian-drawing", "illustration in the style of VCTRNDRWNG"],
105
- ["an African American and a Caucasian man petting a cat at a busy electronic store. Flickr photo from 2012. Three people working in the background", "kudzueye/boreal-flux-dev-v2", "photo"],
106
- ["mgwr/cine, woman silhouette, morning light, sun rays, indoor scene, soft focus, golden hour, stretching pose, peaceful mood, cozy atmosphere, window light, shadows and highlights, backlit figure, minimalistic interior, warm tones, contemplative moment, calm energy, serene environment, yoga-inspired, elegant posture, natural light beams, artistic composition", "mgwr/Cine-Aesthetic", "atmospheric lighting and a dreamy, surreal vibe"]
107
  ]
108
 
109
  # Gradio Interface
 
110
  with gr.Blocks(css=CSS, js=JS, theme="ocean") as demo:
111
- gr.HTML("<h1><center>Flux Lab Light</center></h1>")
112
  gr.HTML("<p><center>Powered By HF Inference API</center></p>")
113
  with gr.Row():
114
  with gr.Column(scale=4):
115
  with gr.Row():
116
- img = gr.Image(type="filepath", label='Flux Generated Image', height=600)
117
  with gr.Row():
118
  prompt = gr.Textbox(label='Enter Your Prompt (Multi-Languages)', placeholder="Enter prompt...", scale=6)
119
  sendBtn = gr.Button(scale=1, variant='primary')
@@ -167,15 +153,9 @@ with gr.Blocks(css=CSS, js=JS, theme="ocean") as demo:
167
  value="",
168
  )
169
 
170
- with gr.Row():
171
- export_png_btn = gr.Button("Export as PNG")
172
- export_base64_btn = gr.Button("Export as Base64")
173
-
174
- export_output = gr.Textbox(label="Exported Image", visible=False)
175
-
176
  gr.Examples(
177
  examples=examples,
178
- inputs=[prompt, lora_add, lora_word],
179
  outputs=[img, seed],
180
  fn=gen,
181
  cache_examples="lazy",
@@ -192,26 +172,14 @@ with gr.Blocks(css=CSS, js=JS, theme="ocean") as demo:
192
  prompt,
193
  lora_add,
194
  lora_word,
195
- width,
196
- height,
197
- scales,
198
- steps,
199
- seed,
200
  ],
201
  outputs=[img, seed]
202
  )
203
-
204
- export_png_btn.click(
205
- export_image,
206
- inputs=[img, gr.State("png")],
207
- outputs=export_output,
208
- )
209
-
210
- export_base64_btn.click(
211
- export_image,
212
- inputs=[img, gr.State("base64")],
213
- outputs=export_output,
214
- )
215
-
216
  if __name__ == "__main__":
217
  demo.queue(api_open=False).launch(show_api=False, share=False)
 
1
+ #Save ZeroGPU limited resources, switch to InferenceAPI
2
  import os
3
  import gradio as gr
4
  import numpy as np
5
  import random
6
  from huggingface_hub import AsyncInferenceClient
7
  from translatepy import Translator
8
+ import requests
9
+ import re
10
  import asyncio
11
  from PIL import Image
 
 
12
 
13
  translator = Translator()
14
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
 
36
  return lora_add
37
 
38
  async def generate_image(
39
+ prompt:str,
40
+ model:str,
41
+ lora_word:str,
42
+ width:int=768,
43
+ height:int=1024,
44
+ scales:float=3.5,
45
+ steps:int=24,
46
+ seed:int=-1):
47
+
48
  if seed == -1:
49
  seed = random.randint(0, MAX_SEED)
50
  seed = int(seed)
51
+ print(f'prompt:{prompt}')
52
+
53
  text = str(translator.translate(prompt, 'English')) + "," + lora_word
54
 
55
  client = AsyncInferenceClient()
56
  try:
57
+ image = await client.text_to_image(
 
58
  prompt=text,
59
  height=height,
60
  width=width,
 
62
  num_inference_steps=steps,
63
  model=model,
64
  )
 
65
  except Exception as e:
66
  raise gr.Error(f"Error in {e}")
67
+
68
+ return image, seed
69
 
70
  async def gen(
71
+ prompt:str,
72
+ lora_add:str="",
73
+ lora_word:str="",
74
+ width:int=768,
75
+ height:int=1024,
76
+ scales:float=3.5,
77
+ steps:int=24,
78
+ seed:int=-1,
79
+ progress=gr.Progress(track_tqdm=True)
80
  ):
81
  model = enable_lora(lora_add)
82
  print(model)
83
+ image, seed = await generate_image(prompt,model,lora_word,width,height,scales,steps,seed)
84
  return image, seed
85
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  examples = [
87
+ ["wanita cantik sepertinya seorang barista, sedang membuat kopi hanya memakai celemek, tanpa baju dan bra. Tatapannya menggoda dan menggairahkan", "burhansyam/davina", "wanita"], ["photograph, background of Earth from space, red car on the Moon watching Earth","martintomov/retrofuturism-flux","retrofuturism"],
88
+ ["a living room interior","fofr/flux-80s-cyberpunk","80s cyberpunk"],
89
+ ["a tourist in London, illustration in the style of VCTRNDRWNG, Victorian-era drawing","dvyio/flux-lora-victorian-drawing","illustration in the style of VCTRNDRWNG"],
90
+ ["an African American and a caucasian man petting a cat at a busy electronic store. flikr photo from 2012. three people working in the background","kudzueye/boreal-flux-dev-v2","photo"],
91
+ ["mgwr/cine, woman silhouette, morning light, sun rays, indoor scene, soft focus, golden hour, stretching pose, peaceful mood, cozy atmosphere, window light, shadows and highlights, backlit figure, minimalistic interior, warm tones, contemplative moment, calm energy, serene environment, yoga-inspired, elegant posture, natural light beams, artistic composition","mgwr/Cine-Aesthetic","atmospheric lighting and a dreamy, surreal vibe"]
92
  ]
93
 
94
  # Gradio Interface
95
+
96
  with gr.Blocks(css=CSS, js=JS, theme="ocean") as demo:
97
+ gr.HTML("<h1><center>Flux Mantab!</center></h1>")
98
  gr.HTML("<p><center>Powered By HF Inference API</center></p>")
99
  with gr.Row():
100
  with gr.Column(scale=4):
101
  with gr.Row():
102
+ img = gr.Image(type="filepath", label='flux Generated Image', height=600)
103
  with gr.Row():
104
  prompt = gr.Textbox(label='Enter Your Prompt (Multi-Languages)', placeholder="Enter prompt...", scale=6)
105
  sendBtn = gr.Button(scale=1, variant='primary')
 
153
  value="",
154
  )
155
 
 
 
 
 
 
 
156
  gr.Examples(
157
  examples=examples,
158
+ inputs=[prompt,lora_add,lora_word],
159
  outputs=[img, seed],
160
  fn=gen,
161
  cache_examples="lazy",
 
172
  prompt,
173
  lora_add,
174
  lora_word,
175
+ width,
176
+ height,
177
+ scales,
178
+ steps,
179
+ seed
180
  ],
181
  outputs=[img, seed]
182
  )
183
+
 
 
 
 
 
 
 
 
 
 
 
 
184
  if __name__ == "__main__":
185
  demo.queue(api_open=False).launch(show_api=False, share=False)