salomonsky commited on
Commit
58b06a7
·
verified ·
1 Parent(s): e5d91cc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -38
app.py CHANGED
@@ -14,7 +14,6 @@ from gradio_imageslider import ImageSlider
14
 
15
  translator = Translator()
16
  HF_TOKEN = os.environ.get("HF_TOKEN")
17
- HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER")
18
  MAX_SEED = np.iinfo(np.int32).max
19
  CSS = "footer { visibility: hidden; }"
20
  JS = "function () { gradioURL = window.location.href; if (!gradioURL.endsWith('?__theme=dark')) { window.location.replace(gradioURL + '?__theme=dark'); } }"
@@ -31,49 +30,21 @@ async def generate_image(prompt, model, lora_word, width, height, scales, steps,
31
  image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
32
  return image, seed
33
 
34
- def get_clarity_upscale(prompt, img_path, upscale_factor):
35
- client = Client("jbilcke-hf/clarity-upscaler")
36
- result = client.predict(
37
- img_path,
38
- prompt,
39
- "",
40
- upscale_factor,
41
- 1,
42
- 3,
43
- 3,
44
- "16",
45
- "16",
46
- "epicrealism_naturalSinRC1VAE.safetensors [84d76a0328]",
47
- "DPM++ 2M Karras",
48
- 1,
49
- 3,
50
- True,
51
- 3,
52
- "Hello!!",
53
- "Hello!!",
54
- api_name="/predict"
55
- )
56
- print(result)
57
- return result
58
-
59
- async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora, upscaler_choice):
60
- model = lora_model
61
  image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
62
  image_path = "temp_image.png"
63
  image.save(image_path)
64
 
65
  if process_upscale:
66
- if upscaler_choice == "FineGrain":
67
- upscale_image = get_upscale_finegrain(prompt, image_path, upscale_factor)
68
- elif upscaler_choice == "Upscaler Clarity":
69
- upscale_image = get_clarity_upscale(prompt, image_path, upscale_factor)
70
  else:
71
  upscale_image = image_path
72
 
73
  return [image_path, upscale_image]
74
 
75
  def get_upscale_finegrain(prompt, img_path, upscale_factor):
76
- client = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN_UPSCALER)
77
  result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process")
78
  return result[1]
79
 
@@ -94,10 +65,9 @@ with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
94
  prompt = gr.Textbox(label="Prompt")
95
  basemodel_choice = gr.Dropdown(label="Base Model", choices=["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"], value="black-forest-labs/FLUX.1-schnell")
96
  lora_model_choice = gr.Dropdown(label="LORA Model", choices=["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"], value="XLabs-AI/flux-RealismLora")
97
- process_lora = gr.Checkbox(label="Process LORA", value=False)
98
- process_upscale = gr.Checkbox(label="Process Upscale", value=False)
99
- upscale_factor = gr.Radio(label="UpScale Factor", choices=[2, 4, 8], value=2, scale=2)
100
- upscaler_choice = gr.Radio(label="Upscaler", choices=["FineGrain", "Upscaler Clarity"], value="FineGrain")
101
 
102
  with gr.Accordion(label="Advanced Options", open=False):
103
  width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=512)
@@ -114,7 +84,7 @@ with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
114
  queue=False
115
  ).then(
116
  fn=gen,
117
- inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora, upscaler_choice],
118
  outputs=[output_res]
119
  )
120
  demo.launch()
 
14
 
15
  translator = Translator()
16
  HF_TOKEN = os.environ.get("HF_TOKEN")
 
17
  MAX_SEED = np.iinfo(np.int32).max
18
  CSS = "footer { visibility: hidden; }"
19
  JS = "function () { gradioURL = window.location.href; if (!gradioURL.endsWith('?__theme=dark')) { window.location.replace(gradioURL + '?__theme=dark'); } }"
 
30
  image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
31
  return image, seed
32
 
33
+ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
34
+ model = enable_lora(lora_model, basemodel) if process_lora else basemodel
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
36
  image_path = "temp_image.png"
37
  image.save(image_path)
38
 
39
  if process_upscale:
40
+ upscale_image = get_upscale_finegrain(prompt, image_path, upscale_factor)
 
 
 
41
  else:
42
  upscale_image = image_path
43
 
44
  return [image_path, upscale_image]
45
 
46
  def get_upscale_finegrain(prompt, img_path, upscale_factor):
47
+ client = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN)
48
  result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process")
49
  return result[1]
50
 
 
65
  prompt = gr.Textbox(label="Prompt")
66
  basemodel_choice = gr.Dropdown(label="Base Model", choices=["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"], value="black-forest-labs/FLUX.1-schnell")
67
  lora_model_choice = gr.Dropdown(label="LORA Model", choices=["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"], value="XLabs-AI/flux-RealismLora")
68
+ process_lora = gr.Checkbox(label="Process LORA")
69
+ process_upscale = gr.Checkbox(label="Process Upscale")
70
+ upscale_factor = gr.Radio(label="UpScale Factor", choices=[2, 4, 8], value=2)
 
71
 
72
  with gr.Accordion(label="Advanced Options", open=False):
73
  width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=512)
 
84
  queue=False
85
  ).then(
86
  fn=gen,
87
+ inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora],
88
  outputs=[output_res]
89
  )
90
  demo.launch()