ginipick commited on
Commit
8570e29
·
verified ·
1 Parent(s): b9d9809

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -17
app.py CHANGED
@@ -13,13 +13,8 @@ from diffusers.utils import export_to_video
13
  import random
14
  from transformers import pipeline
15
 
16
- # Hugging Face 토큰 가져오기
17
- hf_token = os.getenv("HF_TOKEN")
18
- if not hf_token:
19
- raise ValueError("HF_TOKEN environment variable is not set. Please set it to your Hugging Face token.")
20
-
21
- # 번역 모델 로드 (토큰 인증 추가)
22
- translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en", use_auth_token=hf_token)
23
 
24
  # 한글 메뉴 이름 dictionary
25
  korean_labels = {
@@ -41,21 +36,15 @@ korean_labels = {
41
  "Seed": "시드"
42
  }
43
 
44
- # load pipelines (토큰 인증 추가)
45
- # base_model = "black-forest-labs/FLUX.1-dev"
46
-
47
  base_model = "black-forest-labs/FLUX.1-schnell"
48
 
49
-
50
- taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16, use_auth_token=hf_token).to("cuda")
51
  pipe = FluxPipeline.from_pretrained(base_model,
52
  vae=taef1,
53
- torch_dtype=torch.bfloat16,
54
- use_auth_token=hf_token)
55
 
56
  pipe.transformer.to(memory_format=torch.channels_last)
57
- #pipe.transformer = torch.compile(pipe.transformer, mode="max-autotune", fullgraph=True)
58
- # pipe.enable_model_cpu_offload()
59
  clip_slider = CLIPSliderFlux(pipe, device=torch.device("cuda"))
60
 
61
  MAX_SEED = 2**32-1
@@ -85,7 +74,7 @@ def convert_to_centered_scale(num):
85
 
86
  def translate_if_korean(text):
87
  if any('\u3131' <= char <= '\u3163' or '\uac00' <= char <= '\ud7a3' for char in text):
88
- return translator(text, use_auth_token=hf_token)[0]['translation_text']
89
  return text
90
 
91
  @spaces.GPU(duration=85)
 
13
  import random
14
  from transformers import pipeline
15
 
16
+ # 번역 모델 로드
17
+ translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
 
 
 
 
 
18
 
19
  # 한글 메뉴 이름 dictionary
20
  korean_labels = {
 
36
  "Seed": "시드"
37
  }
38
 
39
+ # load pipelines
 
 
40
  base_model = "black-forest-labs/FLUX.1-schnell"
41
 
42
+ taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16).to("cuda")
 
43
  pipe = FluxPipeline.from_pretrained(base_model,
44
  vae=taef1,
45
+ torch_dtype=torch.bfloat16)
 
46
 
47
  pipe.transformer.to(memory_format=torch.channels_last)
 
 
48
  clip_slider = CLIPSliderFlux(pipe, device=torch.device("cuda"))
49
 
50
  MAX_SEED = 2**32-1
 
74
 
75
  def translate_if_korean(text):
76
  if any('\u3131' <= char <= '\u3163' or '\uac00' <= char <= '\ud7a3' for char in text):
77
+ return translator(text)[0]['translation_text']
78
  return text
79
 
80
  @spaces.GPU(duration=85)