openfree commited on
Commit
ead9c45
ยท
verified ยท
1 Parent(s): 28d91b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -6
app.py CHANGED
@@ -722,6 +722,34 @@ def save_generated_image(image, prompt):
722
  image.save(filepath)
723
  return filepath
724
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
725
  @spaces.GPU(duration=60)
726
  def generate_image(
727
  prompt: str,
@@ -740,7 +768,8 @@ def generate_image(
740
  generator = torch.Generator(device=device).manual_seed(seed)
741
 
742
  with torch.inference_mode():
743
- image = pipe(
 
744
  prompt=prompt,
745
  width=width,
746
  height=height,
@@ -757,16 +786,13 @@ def generate_image(
757
  finally:
758
  clear_memory()
759
 
760
-
761
-
762
-
763
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
764
  position = gr.State(value="bottom-center")
765
 
766
  gr.HTML("""
767
  <div class="main-title">
768
- <h1>๐ŸŽจ GiniGen Canvas-o3</h1>
769
- <p>Remove background of specified objects, generate new backgrounds, and insert text over or behind images with prompts.</p>
770
  </div>
771
  """)
772
 
 
722
  image.save(filepath)
723
  return filepath
724
 
725
+ gen_pipe = FluxPipeline.from_pretrained(
726
+ "black-forest-labs/FLUX.1-dev",
727
+ torch_dtype=torch.float16,
728
+ use_auth_token=HF_TOKEN
729
+ )
730
+ gen_pipe.enable_attention_slicing(slice_size="auto")
731
+
732
+ # ์ด๋ฏธ์ง€ ์ƒ์„ฑ์šฉ LoRA ๊ฐ€์ค‘์น˜ ๋กœ๋“œ
733
+ try:
734
+ lora_path = hf_hub_download(
735
+ "ginipick/flux-lora-eric-cat",
736
+ "flux-lora-eric-cat.safetensors", # ์‹ค์ œ ํŒŒ์ผ๋ช… ํ™•์ธ ํ•„์š”
737
+ use_auth_token=HF_TOKEN
738
+ )
739
+ gen_pipe.load_lora_weights(lora_path)
740
+ gen_pipe.fuse_lora(lora_scale=0.125)
741
+ except Exception as e:
742
+ print(f"Error loading generation LoRA weights: {str(e)}")
743
+ raise ValueError("Failed to load generation LoRA weights. Please check your HF_TOKEN and model access.")
744
+
745
+ # GPU๋กœ ์ด๋™
746
+ if torch.cuda.is_available():
747
+ try:
748
+ gen_pipe = gen_pipe.to("cuda:0")
749
+ except Exception as e:
750
+ print(f"Warning: Could not move generation pipeline to CUDA: {str(e)}")
751
+
752
+ # generate_image ํ•จ์ˆ˜ ์ˆ˜์ •
753
  @spaces.GPU(duration=60)
754
  def generate_image(
755
  prompt: str,
 
768
  generator = torch.Generator(device=device).manual_seed(seed)
769
 
770
  with torch.inference_mode():
771
+ # gen_pipe ์‚ฌ์šฉ
772
+ image = gen_pipe(
773
  prompt=prompt,
774
  width=width,
775
  height=height,
 
786
  finally:
787
  clear_memory()
788
 
 
 
 
789
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
790
  position = gr.State(value="bottom-center")
791
 
792
  gr.HTML("""
793
  <div class="main-title">
794
+ <h1>๐ŸŽจ Webtoon Canvas</h1>
795
+ <p>Webtoon generated, Remove background of specified objects, generate new backgrounds, and insert text over or behind images with prompts.</p>
796
  </div>
797
  """)
798