fantaxy commited on
Commit
586370e
Β·
verified Β·
1 Parent(s): 494bc3a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -21
app.py CHANGED
@@ -70,9 +70,6 @@ os.environ["HF_HOME"] = cache_path
70
  # CUDA μ„€μ •
71
  torch.backends.cuda.matmul.allow_tf32 = True
72
 
73
- # λ²ˆμ—­κΈ° μ΄ˆκΈ°ν™”
74
- translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
75
-
76
  # 디렉토리 생성
77
  for dir_path in [gallery_path, video_gallery_path]:
78
  if not path.exists(dir_path):
@@ -110,9 +107,26 @@ def filter_prompt(prompt):
110
 
111
  def process_prompt(prompt):
112
  """ν”„λ‘¬ν”„νŠΈ μ „μ²˜λ¦¬ (λ²ˆμ—­ 및 필터링)"""
113
- translated_prompt = translate_if_korean(prompt)
114
- is_safe, filtered_prompt = filter_prompt(translated_prompt)
115
- return is_safe, filtered_prompt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
  class timer:
118
  def __init__(self, method_name="timed process"):
@@ -311,14 +325,6 @@ def load_gallery():
311
  # ν•œκΈ€-μ˜μ–΄ λ²ˆμ—­κΈ° μ΄ˆκΈ°ν™”
312
  translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
313
 
314
- torch.backends.cuda.matmul.allow_tf32 = False
315
- torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
316
- torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
317
- torch.backends.cudnn.allow_tf32 = False
318
- torch.backends.cudnn.deterministic = False
319
- torch.backends.cuda.preferred_blas_library="cublas"
320
- torch.set_float32_matmul_precision("highest")
321
-
322
  MAX_SEED = np.iinfo(np.int32).max
323
 
324
  # Load Hugging Face token if needed
@@ -348,13 +354,16 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
348
  clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path).to(torch.device("cuda:0"))
349
  clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path)
350
 
351
- def process_prompt(prompt):
352
- # ν•œκΈ€μ΄ ν¬ν•¨λ˜μ–΄ μžˆλŠ”μ§€ 확인
353
- if any(ord('κ°€') <= ord(char) <= ord('힣') for char in prompt):
354
- # ν•œκΈ€μ„ μ˜μ–΄λ‘œ λ²ˆμ—­
355
- translated = translator(prompt)[0]['translation_text']
356
- return translated
357
- return prompt
 
 
 
358
 
359
  def compute_clip_embedding(text=None):
360
  inputs = clip_processor(text=text, return_tensors="pt", padding=True).to(device)
@@ -590,6 +599,8 @@ css = """
590
  [μ΄μ „μ˜ CSS μ½”λ“œλ₯Ό κ·ΈλŒ€λ‘œ μœ μ§€]
591
  """
592
 
 
 
593
  # Gradio μΈν„°νŽ˜μ΄μŠ€ 생성
594
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
595
  gr.HTML('<div class="title">AI Image & Video Generator</div>')
 
70
  # CUDA μ„€μ •
71
  torch.backends.cuda.matmul.allow_tf32 = True
72
 
 
 
 
73
  # 디렉토리 생성
74
  for dir_path in [gallery_path, video_gallery_path]:
75
  if not path.exists(dir_path):
 
107
 
108
  def process_prompt(prompt):
109
  """ν”„λ‘¬ν”„νŠΈ μ „μ²˜λ¦¬ (λ²ˆμ—­ 및 필터링)"""
110
+ # ν•œκΈ€μ΄ ν¬ν•¨λ˜μ–΄ μžˆλŠ”μ§€ 확인
111
+ if any(ord('κ°€') <= ord(char) <= ord('힣') for char in prompt):
112
+ # ν•œκΈ€μ„ μ˜μ–΄λ‘œ λ²ˆμ—­
113
+ translated = translator(prompt)[0]['translation_text']
114
+ prompt = translated
115
+
116
+ # λΆ€μ μ ˆν•œ λ‚΄μš© 필터링
117
+ inappropriate_keywords = [
118
+ "nude", "naked", "nsfw", "porn", "sex", "explicit", "adult", "xxx",
119
+ "erotic", "sensual", "seductive", "provocative", "intimate",
120
+ "violence", "gore", "blood", "death", "kill", "murder", "torture",
121
+ "drug", "suicide", "abuse", "hate", "discrimination"
122
+ ]
123
+
124
+ prompt_lower = prompt.lower()
125
+ for keyword in inappropriate_keywords:
126
+ if keyword in prompt_lower:
127
+ return False, "λΆ€μ μ ˆν•œ λ‚΄μš©μ΄ ν¬ν•¨λœ ν”„λ‘¬ν”„νŠΈμž…λ‹ˆλ‹€."
128
+
129
+ return True, prompt
130
 
131
  class timer:
132
  def __init__(self, method_name="timed process"):
 
325
  # ν•œκΈ€-μ˜μ–΄ λ²ˆμ—­κΈ° μ΄ˆκΈ°ν™”
326
  translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
327
 
 
 
 
 
 
 
 
 
328
  MAX_SEED = np.iinfo(np.int32).max
329
 
330
  # Load Hugging Face token if needed
 
354
  clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path).to(torch.device("cuda:0"))
355
  clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path)
356
 
357
+ # ν•˜λ‚˜μ˜ μΌκ΄€λœ CUDA μ„€μ • μ‚¬μš©
358
+ torch.backends.cuda.matmul.allow_tf32 = False
359
+ torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
360
+ torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
361
+ torch.backends.cudnn.allow_tf32 = False
362
+ torch.backends.cudnn.deterministic = False
363
+ torch.backends.cuda.preferred_blas_library = "cublas"
364
+ torch.set_float32_matmul_precision("highest")
365
+
366
+
367
 
368
  def compute_clip_embedding(text=None):
369
  inputs = clip_processor(text=text, return_tensors="pt", padding=True).to(device)
 
599
  [μ΄μ „μ˜ CSS μ½”λ“œλ₯Ό κ·ΈλŒ€λ‘œ μœ μ§€]
600
  """
601
 
602
+
603
+
604
  # Gradio μΈν„°νŽ˜μ΄μŠ€ 생성
605
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
606
  gr.HTML('<div class="title">AI Image & Video Generator</div>')