Akbartus commited on
Commit
34341e4
·
verified ·
1 Parent(s): 96b8d46

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -9
app.py CHANGED
@@ -14,7 +14,6 @@ from gradio_imageslider import ImageSlider
14
 
15
  MAX_SEED = np.iinfo(np.int32).max
16
 
17
-
18
  def enable_lora(lora_add, basemodel):
19
  return basemodel if not lora_add else lora_add
20
 
@@ -25,19 +24,41 @@ async def generate_image(prompt, model, lora_word, width, height, scales, steps,
25
  seed = int(seed)
26
  text = str(Translator().translate(prompt, 'English')) + "," + lora_word
27
  client = AsyncInferenceClient()
28
- image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
 
 
 
 
 
 
 
29
  return image, seed
30
  except Exception as e:
31
- print(f"Error generando imagen: {e}")
32
  return None, None
33
 
34
  def get_upscale_finegrain(prompt, img_path, upscale_factor):
35
  try:
36
  client = Client("finegrain/finegrain-image-enhancer")
37
- result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  return result[1]
39
  except Exception as e:
40
- print(f"Error escalando imagen: {e}")
41
  return None
42
 
43
  async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
@@ -61,6 +82,10 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
61
  else:
62
  return [image_path, image_path]
63
 
 
 
 
 
64
  css = """
65
  #col-container{ margin: 0 auto; max-width: 1024px;}
66
  """
@@ -72,8 +97,27 @@ with gr.Blocks(css=css) as demo:
72
  output_res = ImageSlider(label="Flux / Upscaled")
73
  with gr.Column(scale=2):
74
  prompt = gr.Textbox(label="Image Description")
75
- basemodel_choice = gr.Dropdown(label="Model", choices=["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV", "enhanceaiteam/Flux-uncensored", "Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro", "Shakker-Labs/FLUX.1-dev-LoRA-add-details", "city96/FLUX.1-dev-gguf"], value="black-forest-labs/FLUX.1-schnell")
76
- lora_model_choice = gr.Dropdown(label="LoRA", choices=["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora", "enhanceaiteam/Flux-uncensored"], value="XLabs-AI/flux-RealismLora")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  process_lora = gr.Checkbox(label="LoRA Process")
78
  process_upscale = gr.Checkbox(label="Scale Process")
79
  upscale_factor = gr.Radio(label="Scaling Factor", choices=[2, 4, 8], value=2)
@@ -86,5 +130,12 @@ with gr.Blocks(css=css) as demo:
86
  seed = gr.Number(label="Seed", value=-1)
87
 
88
  btn = gr.Button("Generate")
89
- btn.click(fn=gen, inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora], outputs=output_res,)
90
- demo.launch()
 
 
 
 
 
 
 
 
14
 
15
  MAX_SEED = np.iinfo(np.int32).max
16
 
 
17
  def enable_lora(lora_add, basemodel):
18
  return basemodel if not lora_add else lora_add
19
 
 
24
  seed = int(seed)
25
  text = str(Translator().translate(prompt, 'English')) + "," + lora_word
26
  client = AsyncInferenceClient()
27
+ image = await client.text_to_image(
28
+ prompt=text,
29
+ height=height,
30
+ width=width,
31
+ guidance_scale=scales,
32
+ num_inference_steps=steps,
33
+ model=model
34
+ )
35
  return image, seed
36
  except Exception as e:
37
+ print(f"Error generating image: {e}")
38
  return None, None
39
 
40
  def get_upscale_finegrain(prompt, img_path, upscale_factor):
41
  try:
42
  client = Client("finegrain/finegrain-image-enhancer")
43
+ result = client.predict(
44
+ input_image=handle_file(img_path),
45
+ prompt=prompt,
46
+ negative_prompt="",
47
+ seed=42,
48
+ upscale_factor=upscale_factor,
49
+ controlnet_scale=0.6,
50
+ controlnet_decay=1,
51
+ condition_scale=6,
52
+ tile_width=112,
53
+ tile_height=144,
54
+ denoise_strength=0.35,
55
+ num_inference_steps=18,
56
+ solver="DDIM",
57
+ api_name="/process"
58
+ )
59
  return result[1]
60
  except Exception as e:
61
+ print(f"Error scaling image: {e}")
62
  return None
63
 
64
  async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
 
82
  else:
83
  return [image_path, image_path]
84
 
85
+ # Helper to run async functions synchronously
86
+ def run_async(fn, *args, **kwargs):
87
+ return asyncio.run(fn(*args, **kwargs))
88
+
89
  css = """
90
  #col-container{ margin: 0 auto; max-width: 1024px;}
91
  """
 
97
  output_res = ImageSlider(label="Flux / Upscaled")
98
  with gr.Column(scale=2):
99
  prompt = gr.Textbox(label="Image Description")
100
+ basemodel_choice = gr.Dropdown(
101
+ label="Model",
102
+ choices=[
103
+ "black-forest-labs/FLUX.1-schnell",
104
+ "black-forest-labs/FLUX.1-DEV",
105
+ "enhanceaiteam/Flux-uncensored",
106
+ "Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro",
107
+ "Shakker-Labs/FLUX.1-dev-LoRA-add-details",
108
+ "city96/FLUX.1-dev-gguf"
109
+ ],
110
+ value="black-forest-labs/FLUX.1-schnell"
111
+ )
112
+ lora_model_choice = gr.Dropdown(
113
+ label="LoRA",
114
+ choices=[
115
+ "Shakker-Labs/FLUX.1-dev-LoRA-add-details",
116
+ "XLabs-AI/flux-RealismLora",
117
+ "enhanceaiteam/Flux-uncensored"
118
+ ],
119
+ value="XLabs-AI/flux-RealismLora"
120
+ )
121
  process_lora = gr.Checkbox(label="LoRA Process")
122
  process_upscale = gr.Checkbox(label="Scale Process")
123
  upscale_factor = gr.Radio(label="Scaling Factor", choices=[2, 4, 8], value=2)
 
130
  seed = gr.Number(label="Seed", value=-1)
131
 
132
  btn = gr.Button("Generate")
133
+ btn.click(
134
+ fn=lambda *inputs: run_async(gen, *inputs),
135
+ inputs=[
136
+ prompt, basemodel_choice, width, height, scales, steps, seed,
137
+ upscale_factor, process_upscale, lora_model_choice, process_lora
138
+ ],
139
+ outputs=output_res
140
+ )
141
+ demo.launch()