aiqtech commited on
Commit
856934e
ยท
verified ยท
1 Parent(s): 61b9b71

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -32
app.py CHANGED
@@ -36,37 +36,38 @@ g = GlobalVars()
36
  def initialize_models(device):
37
  try:
38
  print("Initializing models...")
39
- # 3D ์ƒ์„ฑ ํŒŒ์ดํ”„๋ผ์ธ
40
- g.trellis_pipeline = TrellisImageTo3DPipeline.from_pretrained(
41
- "JeffreyXiang/TRELLIS-image-large"
42
- )
43
-
44
- # ์ด๋ฏธ์ง€ ์ƒ์„ฑ ํŒŒ์ดํ”„๋ผ์ธ
45
- print("Loading flux_pipe...")
46
- g.flux_pipe = FluxPipeline.from_pretrained(
47
- "black-forest-labs/FLUX.1-dev",
48
- torch_dtype=torch.bfloat16,
49
- device_map="balanced"
50
- )
51
-
52
- # Hyper-SD LoRA ๋กœ๋“œ
53
- print("Loading LoRA weights...")
54
- lora_path = hf_hub_download(
55
- "ByteDance/Hyper-SD",
56
- "Hyper-FLUX.1-dev-8steps-lora.safetensors",
57
- use_auth_token=HF_TOKEN
58
- )
59
- g.flux_pipe.load_lora_weights(lora_path)
60
- g.flux_pipe.fuse_lora(lora_scale=0.125)
61
-
62
- # ๋ฒˆ์—ญ๊ธฐ ์ดˆ๊ธฐํ™”
63
- print("Initializing translator...")
64
- g.translator = transformers_pipeline(
65
- "translation",
66
- model="Helsinki-NLP/opus-mt-ko-en",
67
- device=device
68
- )
69
- print("Model initialization completed successfully")
 
70
 
71
  except Exception as e:
72
  print(f"Error during model initialization: {str(e)}")
@@ -77,13 +78,17 @@ torch.cuda.empty_cache()
77
  torch.backends.cuda.matmul.allow_tf32 = True
78
  torch.backends.cudnn.benchmark = True
79
 
80
-
81
  # ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ์„ค์ •
82
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
83
  os.environ['SPCONV_ALGO'] = 'native'
84
  os.environ['SPARSE_BACKEND'] = 'native'
85
  os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
86
  os.environ['TORCH_USE_CUDA_DSA'] = '1'
 
 
 
 
 
87
 
88
  # Hugging Face ํ† ํฐ ์„ค์ •
89
  HF_TOKEN = os.getenv("HF_TOKEN")
 
36
  def initialize_models(device):
37
  try:
38
  print("Initializing models...")
39
+ with torch.no_grad():
40
+ # 3D ์ƒ์„ฑ ํŒŒ์ดํ”„๋ผ์ธ
41
+ g.trellis_pipeline = TrellisImageTo3DPipeline.from_pretrained(
42
+ "JeffreyXiang/TRELLIS-image-large"
43
+ )
44
+
45
+ # ์ด๋ฏธ์ง€ ์ƒ์„ฑ ํŒŒ์ดํ”„๋ผ์ธ
46
+ print("Loading flux_pipe...")
47
+ g.flux_pipe = FluxPipeline.from_pretrained(
48
+ "black-forest-labs/FLUX.1-dev",
49
+ torch_dtype=torch.bfloat16,
50
+ device_map="balanced"
51
+ )
52
+
53
+ # Hyper-SD LoRA ๋กœ๋“œ
54
+ print("Loading LoRA weights...")
55
+ lora_path = hf_hub_download(
56
+ "ByteDance/Hyper-SD",
57
+ "Hyper-FLUX.1-dev-8steps-lora.safetensors",
58
+ use_auth_token=HF_TOKEN
59
+ )
60
+ g.flux_pipe.load_lora_weights(lora_path)
61
+ g.flux_pipe.fuse_lora(lora_scale=0.125)
62
+
63
+ # ๋ฒˆ์—ญ๊ธฐ ์ดˆ๊ธฐํ™”
64
+ print("Initializing translator...")
65
+ g.translator = transformers_pipeline(
66
+ "translation",
67
+ model="Helsinki-NLP/opus-mt-ko-en",
68
+ device=device
69
+ )
70
+ print("Model initialization completed successfully")
71
 
72
  except Exception as e:
73
  print(f"Error during model initialization: {str(e)}")
 
78
  torch.backends.cuda.matmul.allow_tf32 = True
79
  torch.backends.cudnn.benchmark = True
80
 
 
81
  # ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ์„ค์ •
82
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
83
  os.environ['SPCONV_ALGO'] = 'native'
84
  os.environ['SPARSE_BACKEND'] = 'native'
85
  os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
86
  os.environ['TORCH_USE_CUDA_DSA'] = '1'
87
+ os.environ['CUDA_VISIBLE_DEVICES'] = '0'
88
+
89
+ # CUDA ์ดˆ๊ธฐํ™” ๋ฐฉ์ง€
90
+ torch.set_grad_enabled(False)
91
+
92
 
93
  # Hugging Face ํ† ํฐ ์„ค์ •
94
  HF_TOKEN = os.getenv("HF_TOKEN")