import gradio as gr import spaces from gradio_litmodel3d import LitModel3D import os import torch import numpy as np import imageio import uuid from easydict import EasyDict as edict from PIL import Image from trellis.pipelines import TrellisImageTo3DPipeline from trellis.representations import Gaussian, MeshExtractResult from trellis.utils import render_utils, postprocessing_utils from transformers import pipeline as translation_pipeline from diffusers import FluxPipeline from typing import * MAX_SEED = np.iinfo(np.int32).max TMP_DIR = "/tmp/Trellis-demo" os.makedirs(TMP_DIR, exist_ok=True) # 메모리 관련 환경 변수 os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:128' os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache' os.environ['TORCH_HOME'] = '/tmp/torch_home' os.environ['HF_HOME'] = '/tmp/huggingface' os.environ['XDG_CACHE_HOME'] = '/tmp/cache' os.environ['SPCONV_ALGO'] = 'native' os.environ['WARP_USE_CPU'] = '1' def initialize_models(): global pipeline, translator, flux_pipe try: # 캐시 디렉토리 생성 for dir_path in ['/tmp/transformers_cache', '/tmp/torch_home', '/tmp/huggingface', '/tmp/cache']: os.makedirs(dir_path, exist_ok=True) # Trellis 파이프라인 초기화 pipeline = TrellisImageTo3DPipeline.from_pretrained( "JeffreyXiang/TRELLIS-image-large" ) # 번역기 초기화 translator = translation_pipeline( "translation", model="Helsinki-NLP/opus-mt-ko-en", device="cpu" ) # Flux 파이프라인은 필요할 때 로드 flux_pipe = None print("Models initialized successfully") return True except Exception as e: print(f"Model initialization error: {str(e)}") return False def load_flux_pipe(): """Flux 파이프라인을 필요할 때만 로드""" global flux_pipe if flux_pipe is None: flux_pipe = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", torch_dtype=torch.float32 ) return flux_pipe def free_memory(): """메모리를 정리하는 유틸리티 함수""" import gc gc.collect() # 임시 파일 정리 for dir_path in ['/tmp/transformers_cache', '/tmp/torch_home', '/tmp/huggingface', '/tmp/cache']: if os.path.exists(dir_path): for file in os.listdir(dir_path): file_path = os.path.join(dir_path, file) try: if os.path.isfile(file_path): os.unlink(file_path) except Exception as e: print(f'Error deleting {file_path}: {e}') @spaces.GPU def setup_gpu_model(model): """GPU 설정이 필요한 모델을 처리하는 함수""" if torch.cuda.is_available(): model = model.to("cuda") return model def translate_if_korean(text): if any(ord('가') <= ord(char) <= ord('힣') for char in text): translated = translator(text)[0]['translation_text'] return translated return text @spaces.GPU def preprocess_image(image: Image.Image) -> Tuple[str, Image.Image]: try: trial_id = str(uuid.uuid4()) # 이미지가 너무 작은 경우 크기 조정 min_size = 64 if image.size[0] < min_size or image.size[1] < min_size: ratio = min_size / min(image.size) new_size = tuple(int(dim * ratio) for dim in image.size) image = image.resize(new_size, Image.LANCZOS) processed_image = pipeline.preprocess_image(image) processed_image.save(f"{TMP_DIR}/{trial_id}.png") return trial_id, processed_image except Exception as e: print(f"Error in preprocess_image: {str(e)}") return None, None def pack_state(gs: Gaussian, mesh: MeshExtractResult, trial_id: str) -> dict: return { 'gaussian': { **gs.init_params, '_xyz': gs._xyz.cpu().numpy(), '_features_dc': gs._features_dc.cpu().numpy(), '_scaling': gs._scaling.cpu().numpy(), '_rotation': gs._rotation.cpu().numpy(), '_opacity': gs._opacity.cpu().numpy(), }, 'mesh': { 'vertices': mesh.vertices.cpu().numpy(), 'faces': mesh.faces.cpu().numpy(), }, 'trial_id': trial_id, } def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]: gs = Gaussian( aabb=state['gaussian']['aabb'], sh_degree=state['gaussian']['sh_degree'], mininum_kernel_size=state['gaussian']['mininum_kernel_size'], scaling_bias=state['gaussian']['scaling_bias'], opacity_bias=state['gaussian']['opacity_bias'], scaling_activation=state['gaussian']['scaling_activation'], ) gs._xyz = torch.tensor(state['gaussian']['_xyz'], device='cuda') gs._features_dc = torch.tensor(state['gaussian']['_features_dc'], device='cuda') gs._scaling = torch.tensor(state['gaussian']['_scaling'], device='cuda') gs._rotation = torch.tensor(state['gaussian']['_rotation'], device='cuda') gs._opacity = torch.tensor(state['gaussian']['_opacity'], device='cuda') mesh = edict( vertices=torch.tensor(state['mesh']['vertices'], device='cuda'), faces=torch.tensor(state['mesh']['faces'], device='cuda'), ) return gs, mesh, state['trial_id'] @spaces.GPU def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_strength: float, ss_sampling_steps: int, slat_guidance_strength: float, slat_sampling_steps: int): try: if randomize_seed: seed = np.random.randint(0, MAX_SEED) input_image = Image.open(f"{TMP_DIR}/{trial_id}.png") # 이미지 크기 제한 max_size = 512 if max(input_image.size) > max_size: ratio = max_size / max(input_image.size) input_image = input_image.resize( (int(input_image.size[0] * ratio), int(input_image.size[1] * ratio)), Image.LANCZOS ) if torch.cuda.is_available(): pipeline.to("cuda") with torch.no_grad(): outputs = pipeline.run( input_image, seed=seed, formats=["gaussian", "mesh"], preprocess_image=False, sparse_structure_sampler_params={ "steps": min(ss_sampling_steps, 15), "cfg_strength": ss_guidance_strength, }, slat_sampler_params={ "steps": min(slat_sampling_steps, 15), "cfg_strength": slat_guidance_strength, } ) # 비디오 프레임 수 감소 video = render_utils.render_video(outputs['gaussian'][0], num_frames=30)['color'] video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=30)['normal'] video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))] trial_id = str(uuid.uuid4()) video_path = f"{TMP_DIR}/{trial_id}.mp4" os.makedirs(os.path.dirname(video_path), exist_ok=True) imageio.mimsave(video_path, video, fps=15) state = pack_state(outputs['gaussian'][0], outputs['mesh'][0], trial_id) if torch.cuda.is_available(): pipeline.to("cpu") return state, video_path except Exception as e: print(f"Error in image_to_3d: {str(e)}") if torch.cuda.is_available(): pipeline.to("cpu") raise e @spaces.GPU def generate_image_from_text(prompt, height, width, guidance_scale, num_steps): try: # Flux 파이프라인 로드 flux_pipe = load_flux_pipe() if torch.cuda.is_available(): flux_pipe.to("cuda") # 이미지 크기 제한 height = min(height, 512) width = min(width, 512) base_prompt = "wbgmsst, 3D, white background" translated_prompt = translate_if_korean(prompt) final_prompt = f"{translated_prompt}, {base_prompt}" with torch.inference_mode(): image = flux_pipe( prompt=[final_prompt], height=height, width=width, guidance_scale=min(guidance_scale, 10.0), num_inference_steps=min(num_steps, 30) ).images[0] if torch.cuda.is_available(): flux_pipe.to("cpu") return image except Exception as e: print(f"Error in generate_image_from_text: {str(e)}") if torch.cuda.is_available() and flux_pipe is not None: flux_pipe.to("cpu") raise e @spaces.GPU def extract_glb(state: dict, mesh_simplify: float, texture_size: int) -> Tuple[str, str]: gs, mesh, trial_id = unpack_state(state) glb = postprocessing_utils.to_glb(gs, mesh, simplify=mesh_simplify, texture_size=texture_size, verbose=False) glb_path = f"{TMP_DIR}/{trial_id}.glb" glb.export(glb_path) return glb_path, glb_path def activate_button() -> gr.Button: return gr.Button(interactive=True) def deactivate_button() -> gr.Button: return gr.Button(interactive=False) css = """ footer { visibility: hidden; } """ # Gradio 인터페이스 정의 with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo: gr.Markdown(""" # Craft3D : 3D Asset Creation & Text-to-Image Generation """) with gr.Tabs(): with gr.TabItem("Image to 3D"): with gr.Row(): with gr.Column(): image_prompt = gr.Image(label="Image Prompt", image_mode="RGBA", type="pil", height=300) with gr.Accordion(label="Generation Settings", open=False): seed = gr.Slider(0, MAX_SEED, label="Seed", value=0, step=1) randomize_seed = gr.Checkbox(label="Randomize Seed", value=True) gr.Markdown("Stage 1: Sparse Structure Generation") with gr.Row(): ss_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=7.5, step=0.1) ss_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1) gr.Markdown("Stage 2: Structured Latent Generation") with gr.Row(): slat_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=3.0, step=0.1) slat_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1) generate_btn = gr.Button("Generate") with gr.Accordion(label="GLB Extraction Settings", open=False): mesh_simplify = gr.Slider(0.9, 0.98, label="Simplify", value=0.95, step=0.01) texture_size = gr.Slider(512, 2048, label="Texture Size", value=1024, step=512) extract_glb_btn = gr.Button("Extract GLB", interactive=False) with gr.Column(): video_output = gr.Video(label="Generated 3D Asset", autoplay=True, loop=True, height=300) model_output = LitModel3D(label="Extracted GLB", exposure=20.0, height=300) download_glb = gr.DownloadButton(label="Download GLB", interactive=False) with gr.TabItem("Text to Image"): with gr.Row(): with gr.Column(): text_prompt = gr.Textbox( label="Text Prompt", placeholder="Enter your image description...", lines=3 ) with gr.Row(): txt2img_height = gr.Slider(256, 1024, value=512, step=64, label="Height") txt2img_width = gr.Slider(256, 1024, value=512, step=64, label="Width") with gr.Row(): guidance_scale = gr.Slider(1.0, 20.0, value=7.5, label="Guidance Scale") num_steps = gr.Slider(1, 50, value=20, label="Number of Steps") generate_txt2img_btn = gr.Button("Generate Image") with gr.Column(): txt2img_output = gr.Image(label="Generated Image") trial_id = gr.Textbox(visible=False) output_buf = gr.State() # Example images with gr.Row(): examples = gr.Examples( examples=[ f'assets/example_image/{image}' for image in os.listdir("assets/example_image") ], inputs=[image_prompt], fn=preprocess_image, outputs=[trial_id, image_prompt], run_on_click=True, examples_per_page=64, ) # Handlers image_prompt.upload( preprocess_image, inputs=[image_prompt], outputs=[trial_id, image_prompt], ) image_prompt.clear( lambda: '', outputs=[trial_id], ) generate_btn.click( image_to_3d, inputs=[trial_id, seed, randomize_seed, ss_guidance_strength, ss_sampling_steps, slat_guidance_strength, slat_sampling_steps], outputs=[output_buf, video_output], concurrency_limit=1 ).then( activate_button, outputs=[extract_glb_btn] ) extract_glb_btn.click( extract_glb, inputs=[output_buf, mesh_simplify, texture_size], outputs=[model_output, download_glb], concurrency_limit=1 ).then( activate_button, outputs=[download_glb] ) generate_txt2img_btn.click( generate_image_from_text, inputs=[text_prompt, txt2img_height, txt2img_width, guidance_scale, num_steps], outputs=[txt2img_output], concurrency_limit=1 ) if __name__ == "__main__": # 메모리 정리 free_memory() # 모델 초기화 if not initialize_models(): print("Failed to initialize models") exit(1) try: # rembg 사전 로드 시도 (매우 작은 이미지로) test_image = Image.fromarray(np.ones((32, 32, 3), dtype=np.uint8) * 255) pipeline.preprocess_image(test_image) except Exception as e: print(f"Warning: Failed to preload rembg: {str(e)}") # Gradio 앱 실행 demo.queue(max_size=3).launch( share=True, max_threads=1, show_error=True, cache_examples=False )