import os import time from os import path import tempfile import uuid import base64 import mimetypes import json import io import random import string import torch from PIL import Image from safetensors.torch import load_file from huggingface_hub import hf_hub_download # Diffusers 관련 라이브러리 import gradio as gr from diffusers import FluxPipeline # Google GenAI 라이브러리 from google import genai from google.genai import types ####################################### # 0. 환경설정 ####################################### BASE_DIR = path.dirname(path.abspath(__file__)) if "__file__" in globals() else os.getcwd() CACHE_PATH = path.join(BASE_DIR, "models") os.environ["TRANSFORMERS_CACHE"] = CACHE_PATH os.environ["HF_HUB_CACHE"] = CACHE_PATH os.environ["HF_HOME"] = CACHE_PATH class timer: def __init__(self, method_name="timed process"): self.method = method_name def __enter__(self): self.start = time.time() print(f"[TIMER] {self.method} starts") def __exit__(self, exc_type, exc_val, exc_tb): end = time.time() print(f"[TIMER] {self.method} took {round(end - self.start, 2)}s") ####################################### # 1. FLUX 파이프라인 로드 ####################################### if not path.exists(CACHE_PATH): os.makedirs(CACHE_PATH, exist_ok=True) pipe = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16 ) lora_path = hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors") pipe.load_lora_weights(lora_path) pipe.fuse_lora(lora_scale=0.125) pipe.to(device="cuda", dtype=torch.bfloat16) ####################################### # 2. Google GenAI (Gemini) - 이미지 변환 함수 ####################################### def save_binary_file(file_name, data): with open(file_name, "wb") as f: f.write(data) def generate_by_google_genai(text, file_name, model="gemini-2.0-flash-exp"): """Gemini 모델을 통해 이미지 내부 텍스트를 변경.""" api_key = os.getenv("GAPI_TOKEN", None) if not api_key: raise ValueError( "GAPI_TOKEN 환경 변수가 설정되지 않았습니다. " "Google GenAI API를 사용하기 위해서는 GAPI_TOKEN이 필요합니다." ) client = genai.Client(api_key=api_key) files = [client.files.upload(file=file_name)] contents = [ types.Content( role="user", parts=[ types.Part.from_uri( file_uri=files[0].uri, mime_type=files[0].mime_type, ), types.Part.from_text(text=text), ], ), ] generate_content_config = types.GenerateContentConfig( temperature=1, top_p=0.95, top_k=40, max_output_tokens=8192, response_modalities=["image", "text"], response_mime_type="text/plain", ) text_response = "" image_path = None with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp: temp_path = tmp.name for chunk in client.models.generate_content_stream( model=model, contents=contents, config=generate_content_config, ): if not chunk.candidates or not chunk.candidates[0].content: continue candidate = chunk.candidates[0].content.parts[0] if candidate.inline_data: save_binary_file(temp_path, candidate.inline_data.data) print(f"[DEBUG] Gemini returned image -> {temp_path}") image_path = temp_path break else: text_response += chunk.text + "\n" del files return image_path, text_response ####################################### # 3. Diffusion (Flux)용 함수 ####################################### def generate_random_letters(length: int) -> str: """length 길이만큼 대소문자 알파벳을 무작위로 생성.""" letters = string.ascii_lowercase + string.ascii_uppercase return "".join(random.choice(letters) for _ in range(length)) def fill_prompt_with_random_texts(prompt: str, r1: str, r2: str, r3: str) -> str: """ 프롬프트 내 , , 를 각각 r1, r2, r3로 치환. - 은 필수 (없으면 자동으로 뒤에 붙임). - , 는 있으면 치환, 없으면 무시. """ # 1) 은 필수 if "" in prompt: prompt = prompt.replace("", r1) else: # 자동 덧붙임 prompt = f"{prompt} with clear readable text that says '{r1}'" # 2) , 는 선택 if "" in prompt: prompt = prompt.replace("", r2) if "" in prompt: prompt = prompt.replace("", r3) return prompt def generate_initial_image(prompt, random1, random2, random3, height, width, steps, scale, seed): """ Flux 파이프라인을 이용해 (r1, r2, r3)가 들어간 이미지를 생성. """ with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("Flux Generation"): result = pipe( prompt=[prompt], generator=torch.Generator().manual_seed(int(seed)), num_inference_steps=int(steps), guidance_scale=float(scale), height=int(height), width=int(width), max_sequence_length=256 ).images[0] return result def change_multi_text_in_image(original_image, random1, final1, random2, final2, random3, final3): """ Gemini를 통해, 이미지 안의 r1->final1, r2->final2, r3->final3 식으로 텍스트 교체. - r2, final2 (또는 r3, final3)가 빈 문자열이면 해당 교체는 건너뜀. """ # 교체 지시문 만들기 instructions = [] if random1 and final1: instructions.append(f"Change any text reading '{random1}' in this image to '{final1}'.") if random2 and final2: instructions.append(f"Change any text reading '{random2}' in this image to '{final2}'.") if random3 and final3: instructions.append(f"Change any text reading '{random3}' in this image to '{final3}'.") # 만약 교체 지시문이 없다면 그냥 return original_image if not instructions: print("[WARN] No text changes requested!") return original_image full_instruction = " ".join(instructions) try: # 임시 파일에 original_image 저장 with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp: original_path = tmp.name original_image.save(original_path) image_path, text_response = generate_by_google_genai( text=full_instruction, file_name=original_path ) if image_path: with open(image_path, "rb") as f: image_data = f.read() new_img = Image.open(io.BytesIO(image_data)) return new_img else: # 이미지 없이 텍스트만 온 경우 print("[WARN] Gemini returned only text:", text_response) return original_image except Exception as e: raise gr.Error(f"Error: {e}") ####################################### # 4. 메인 프로세스 함수 ####################################### def run_process( prompt, final_text1, final_text2, final_text3, height, width, steps, scale, seed ): """ 1) final_text1(필수), final_text2, final_text3(옵션) 각각 길이에 맞춰 무작위 알파벳 만들기. 2) prompt 내 , , 치환 -> Flux로 1차(랜덤) 이미지. 3) Gemini 호출 -> r1->final_text1, r2->final_text2, r3->final_text3 교체 -> 최종 이미지. """ # (A) 무작위 알파벳 r1 = generate_random_letters(len(final_text1)) if final_text1 else "" r2 = generate_random_letters(len(final_text2)) if final_text2 else "" r3 = generate_random_letters(len(final_text3)) if final_text3 else "" # (B) 프롬프트 치환 final_prompt = fill_prompt_with_random_texts(prompt, r1, r2, r3) print(f"[DEBUG] final_prompt = {final_prompt}") # (C) 1차 이미지 (랜덤 텍스트) random_image = generate_initial_image(final_prompt, r1, r2, r3, height, width, steps, scale, seed) # (D) 2차 이미지 (실제 텍스트) final_image = change_multi_text_in_image( random_image, r1, final_text1, r2, final_text2, r3, final_text3 ) return [random_image, final_image] ####################################### # 5. Gradio UI ####################################### with gr.Blocks(title="Flux + Google GenAI (Up to 3 Text placeholders)") as demo: gr.Markdown( """ # Flux + Google GenAI: 최대 3개의 `` 교체 ## 사용 방법 1. 아래 Prompt에 ``, ``, ``를 최대 3개까지 배치 가능. - 예) "A poster with in large letters, also in the corner" - **은 필수**(없으면 자동으로 문구가 뒤에 붙음) - , 는 넣어도 되고, 안 넣어도 됨. 2. "New Text #1" (필수), "New Text #2", "New Text #3"를 입력. - #2, #3는 비워 두면 해당 자리 교체 없음. 3. "Generate Images" 버튼 → (1) ``, ``, `` 자리에 (또는 자동으로) **무작위 알파벳** 넣은 1차 이미지 생성 (2) 이어 Gemini 모델을 통해 무작위 알파벳 → 실제 "New Text #1/2/3" 변경한 2차 이미지 - **두 이미지**(랜덤 텍스트 → 최종 텍스트)가 순서대로 출력됩니다. --- """ ) # 예시 5개 examples = [ [ "A futuristic billboard shows and a small sign on the left side. is a hidden watermark.", "HELLO", "WELCOME", "2025" ], [ "A fantasy poster with and in stylized letters, plus a tiny note at the bottom.", "Dragons", "MagicRealm", "Beware!" ], [ "A neon sign reading , with a secondary text below. might appear in the corner.", "OPEN", "24HOUR", "NoSmoking" ], [ "A big invitation card with main text , subtitle , signature in cursive.", "Birthday Party", "Today Only", "From Your Friend" ], [ "A large graffiti wall with in bold letters, plus and near the edges.", "FREEDOM", "HOPE", "LOVE" ] ] with gr.Row(): with gr.Column(): prompt_input = gr.Textbox( lines=3, label="Prompt (use ``, ``, `` as needed)", placeholder="Ex) A poster with , plus a line , etc." ) final_text1 = gr.Textbox( label="New Text #1 (Required)", placeholder="Ex) HELLO" ) final_text2 = gr.Textbox( label="New Text #2 (Optional)", placeholder="Ex) WELCOME" ) final_text3 = gr.Textbox( label="New Text #3 (Optional)", placeholder="Ex) 2025 or anything" ) with gr.Accordion("Advanced Settings", open=False): height = gr.Slider(label="Height", minimum=256, maximum=1152, step=64, value=512) width = gr.Slider(label="Width", minimum=256, maximum=1152, step=64, value=512) steps = gr.Slider(label="Inference Steps", minimum=6, maximum=25, step=1, value=8) scale = gr.Slider(label="Guidance Scale", minimum=0.0, maximum=10.0, step=0.5, value=3.5) seed = gr.Number(label="Seed (reproducibility)", value=1234, precision=0) run_btn = gr.Button("Generate Images", variant="primary") gr.Examples( examples=examples, inputs=[prompt_input, final_text1, final_text2, final_text3], label="Click to load example" ) with gr.Column(): random_image_output = gr.Image(label="1) Random Text Image", type="pil") final_image_output = gr.Image(label="2) Final Text Image", type="pil") # 버튼 액션 run_btn.click( fn=run_process, inputs=[ prompt_input, final_text1, final_text2, final_text3, height, width, steps, scale, seed ], outputs=[random_image_output, final_image_output] ) demo.launch(max_threads=20)