openfree commited on
Commit
0aa641f
·
verified ·
1 Parent(s): 9a70f56

Delete app-backup.py

Browse files
Files changed (1) hide show
  1. app-backup.py +0 -768
app-backup.py DELETED
@@ -1,768 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- import os
4
- import re
5
- import tempfile
6
- from collections.abc import Iterator
7
- from threading import Thread
8
- import json
9
- import requests
10
- import cv2
11
- import gradio as gr
12
- import spaces
13
- import torch
14
- from loguru import logger
15
- from PIL import Image
16
- from transformers import AutoProcessor, Gemma3ForConditionalGeneration, TextIteratorStreamer
17
-
18
- # CSV/TXT 분석
19
- import pandas as pd
20
- # PDF 텍스트 추출
21
- import PyPDF2
22
-
23
- ##############################################################################
24
- # SERPHouse API key from environment variable
25
- ##############################################################################
26
- SERPHOUSE_API_KEY = os.getenv("SERPHOUSE_API_KEY", "")
27
-
28
- ##############################################################################
29
- # 간단한 키워드 추출 함수 (한글 + 알파벳 + 숫자 + 공백 보존)
30
- ##############################################################################
31
- def extract_keywords(text: str, top_k: int = 5) -> str:
32
- """
33
- 1) 한글(가-힣), 영어(a-zA-Z), 숫자(0-9), 공백만 남김
34
- 2) 공백 기준 토큰 분리
35
- 3) 최대 top_k개만
36
- """
37
- text = re.sub(r"[^a-zA-Z0-9가-힣\s]", "", text)
38
- tokens = text.split()
39
- key_tokens = tokens[:top_k]
40
- return " ".join(key_tokens)
41
-
42
- ##############################################################################
43
- # SerpHouse Live endpoint 호출
44
- # - 상위 20개 결과 JSON을 LLM에 넘길 때 link, snippet 등 모두 포함
45
- ##############################################################################
46
- def do_web_search(query: str) -> str:
47
- """
48
- 상위 20개 'organic' 결과 item 전체(제목, link, snippet 등)를
49
- JSON 문자열 형태로 반환
50
- """
51
- try:
52
- url = "https://api.serphouse.com/serp/live"
53
-
54
- # 기본 GET 방식으로 파라미터 간소화하고 결과 수를 20개로 제한
55
- params = {
56
- "q": query,
57
- "domain": "google.com",
58
- "serp_type": "web", # 기본 웹 검색
59
- "device": "desktop",
60
- "lang": "en",
61
- "num": "20" # 최대 20개 결과만 요청
62
- }
63
-
64
- headers = {
65
- "Authorization": f"Bearer {SERPHOUSE_API_KEY}"
66
- }
67
-
68
- logger.info(f"SerpHouse API 호출 중... 검색어: {query}")
69
- logger.info(f"요청 URL: {url} - 파라미터: {params}")
70
-
71
- # GET 요청 수행
72
- response = requests.get(url, headers=headers, params=params, timeout=30)
73
- response.raise_for_status()
74
-
75
- logger.info(f"SerpHouse API 응답 상태 코드: {response.status_code}")
76
- data = response.json()
77
-
78
- # 다양한 응답 구조 처리
79
- results = data.get("results", {})
80
- organic = None
81
-
82
- # 가능한 응답 구조 1
83
- if isinstance(results, dict) and "organic" in results:
84
- organic = results["organic"]
85
-
86
- # 가능한 응답 구조 2 (중첩된 results)
87
- elif isinstance(results, dict) and "results" in results:
88
- if isinstance(results["results"], dict) and "organic" in results["results"]:
89
- organic = results["results"]["organic"]
90
-
91
- # 가능한 응답 구조 3 (최상위 organic)
92
- elif "organic" in data:
93
- organic = data["organic"]
94
-
95
- if not organic:
96
- logger.warning("응답에서 organic 결과를 찾을 수 없습니다.")
97
- logger.debug(f"응답 구조: {list(data.keys())}")
98
- if isinstance(results, dict):
99
- logger.debug(f"results 구조: {list(results.keys())}")
100
- return "No web search results found or unexpected API response structure."
101
-
102
- # 결과 수 제한 및 컨텍스트 길이 최적화
103
- max_results = min(20, len(organic))
104
- limited_organic = organic[:max_results]
105
-
106
- # 결과 형식 간소화 - 전체 JSON 대신 중요 필드만 포함
107
- summary_lines = []
108
- for idx, item in enumerate(limited_organic, start=1):
109
- title = item.get("title", "No title")
110
- link = item.get("link", "#")
111
- snippet = item.get("snippet", "No description")
112
-
113
- # 간소화된 형식
114
- summary_lines.append(
115
- f"Result {idx}:\n"
116
- f"- Title: {title}\n"
117
- f"- Link: {link}\n"
118
- f"- Snippet: {snippet}\n"
119
- )
120
-
121
- logger.info(f"검색 결과 {len(limited_organic)}개 처리 완료")
122
- return "\n".join(summary_lines)
123
-
124
- except Exception as e:
125
- logger.error(f"Web search failed: {e}")
126
- return f"Web search failed: {str(e)}"
127
-
128
-
129
- ##############################################################################
130
- # 모델/프로세서 로딩
131
- ##############################################################################
132
- MAX_CONTENT_CHARS = 4000
133
- model_id = os.getenv("MODEL_ID", "VIDraft/Gemma3-R1945-27B")
134
-
135
- processor = AutoProcessor.from_pretrained(model_id, padding_side="left")
136
- model = Gemma3ForConditionalGeneration.from_pretrained(
137
- model_id,
138
- device_map="auto",
139
- torch_dtype=torch.bfloat16,
140
- attn_implementation="eager"
141
- )
142
- MAX_NUM_IMAGES = int(os.getenv("MAX_NUM_IMAGES", "5"))
143
-
144
-
145
- ##############################################################################
146
- # CSV, TXT, PDF 분석 함수
147
- ##############################################################################
148
- def analyze_csv_file(path: str) -> str:
149
- """
150
- CSV 파일을 전체 문자열로 변환. 너무 길 경우 일부만 표시.
151
- """
152
- try:
153
- df = pd.read_csv(path)
154
- if df.shape[0] > 50 or df.shape[1] > 10:
155
- df = df.iloc[:50, :10]
156
- df_str = df.to_string()
157
- if len(df_str) > MAX_CONTENT_CHARS:
158
- df_str = df_str[:MAX_CONTENT_CHARS] + "\n...(truncated)..."
159
- return f"**[CSV File: {os.path.basename(path)}]**\n\n{df_str}"
160
- except Exception as e:
161
- return f"Failed to read CSV ({os.path.basename(path)}): {str(e)}"
162
-
163
-
164
- def analyze_txt_file(path: str) -> str:
165
- """
166
- TXT 파일 전문 읽기. 너무 길면 일부만 표시.
167
- """
168
- try:
169
- with open(path, "r", encoding="utf-8") as f:
170
- text = f.read()
171
- if len(text) > MAX_CONTENT_CHARS:
172
- text = text[:MAX_CONTENT_CHARS] + "\n...(truncated)..."
173
- return f"**[TXT File: {os.path.basename(path)}]**\n\n{text}"
174
- except Exception as e:
175
- return f"Failed to read TXT ({os.path.basename(path)}): {str(e)}"
176
-
177
-
178
- def pdf_to_markdown(pdf_path: str) -> str:
179
- """
180
- PDF → Markdown. 페이지별로 간단히 텍스트 추출.
181
- """
182
- text_chunks = []
183
- try:
184
- with open(pdf_path, "rb") as f:
185
- reader = PyPDF2.PdfReader(f)
186
- max_pages = min(5, len(reader.pages))
187
- for page_num in range(max_pages):
188
- page = reader.pages[page_num]
189
- page_text = page.extract_text() or ""
190
- page_text = page_text.strip()
191
- if page_text:
192
- if len(page_text) > MAX_CONTENT_CHARS // max_pages:
193
- page_text = page_text[:MAX_CONTENT_CHARS // max_pages] + "...(truncated)"
194
- text_chunks.append(f"## Page {page_num+1}\n\n{page_text}\n")
195
- if len(reader.pages) > max_pages:
196
- text_chunks.append(f"\n...(Showing {max_pages} of {len(reader.pages)} pages)...")
197
- except Exception as e:
198
- return f"Failed to read PDF ({os.path.basename(pdf_path)}): {str(e)}"
199
-
200
- full_text = "\n".join(text_chunks)
201
- if len(full_text) > MAX_CONTENT_CHARS:
202
- full_text = full_text[:MAX_CONTENT_CHARS] + "\n...(truncated)..."
203
-
204
- return f"**[PDF File: {os.path.basename(pdf_path)}]**\n\n{full_text}"
205
-
206
-
207
- ##############################################################################
208
- # 이미지/비디오 업로드 제한 검사
209
- ##############################################################################
210
- def count_files_in_new_message(paths: list[str]) -> tuple[int, int]:
211
- image_count = 0
212
- video_count = 0
213
- for path in paths:
214
- if path.endswith(".mp4"):
215
- video_count += 1
216
- elif re.search(r"\.(png|jpg|jpeg|gif|webp)$", path, re.IGNORECASE):
217
- image_count += 1
218
- return image_count, video_count
219
-
220
-
221
- def count_files_in_history(history: list[dict]) -> tuple[int, int]:
222
- image_count = 0
223
- video_count = 0
224
- for item in history:
225
- if item["role"] != "user" or isinstance(item["content"], str):
226
- continue
227
- if isinstance(item["content"], list) and len(item["content"]) > 0:
228
- file_path = item["content"][0]
229
- if isinstance(file_path, str):
230
- if file_path.endswith(".mp4"):
231
- video_count += 1
232
- elif re.search(r"\.(png|jpg|jpeg|gif|webp)$", file_path, re.IGNORECASE):
233
- image_count += 1
234
- return image_count, video_count
235
-
236
-
237
- def validate_media_constraints(message: dict, history: list[dict]) -> bool:
238
- media_files = []
239
- for f in message["files"]:
240
- if re.search(r"\.(png|jpg|jpeg|gif|webp)$", f, re.IGNORECASE) or f.endswith(".mp4"):
241
- media_files.append(f)
242
-
243
- new_image_count, new_video_count = count_files_in_new_message(media_files)
244
- history_image_count, history_video_count = count_files_in_history(history)
245
- image_count = history_image_count + new_image_count
246
- video_count = history_video_count + new_video_count
247
-
248
- if video_count > 1:
249
- gr.Warning("Only one video is supported.")
250
- return False
251
- if video_count == 1:
252
- if image_count > 0:
253
- gr.Warning("Mixing images and videos is not allowed.")
254
- return False
255
- if "<image>" in message["text"]:
256
- gr.Warning("Using <image> tags with video files is not supported.")
257
- return False
258
- if video_count == 0 and image_count > MAX_NUM_IMAGES:
259
- gr.Warning(f"You can upload up to {MAX_NUM_IMAGES} images.")
260
- return False
261
-
262
- if "<image>" in message["text"]:
263
- image_files = [f for f in message["files"] if re.search(r"\.(png|jpg|jpeg|gif|webp)$", f, re.IGNORECASE)]
264
- image_tag_count = message["text"].count("<image>")
265
- if image_tag_count != len(image_files):
266
- gr.Warning("The number of <image> tags in the text does not match the number of image files.")
267
- return False
268
-
269
- return True
270
-
271
-
272
- ##############################################################################
273
- # 비디오 처리
274
- ##############################################################################
275
- def downsample_video(video_path: str) -> list[tuple[Image.Image, float]]:
276
- vidcap = cv2.VideoCapture(video_path)
277
- fps = vidcap.get(cv2.CAP_PROP_FPS)
278
- total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
279
- frame_interval = max(int(fps), int(total_frames / 10))
280
- frames = []
281
-
282
- for i in range(0, total_frames, frame_interval):
283
- vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)
284
- success, image = vidcap.read()
285
- if success:
286
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
287
- pil_image = Image.fromarray(image)
288
- timestamp = round(i / fps, 2)
289
- frames.append((pil_image, timestamp))
290
- if len(frames) >= 5:
291
- break
292
-
293
- vidcap.release()
294
- return frames
295
-
296
-
297
- def process_video(video_path: str) -> list[dict]:
298
- content = []
299
- frames = downsample_video(video_path)
300
- for frame in frames:
301
- pil_image, timestamp = frame
302
- with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file:
303
- pil_image.save(temp_file.name)
304
- content.append({"type": "text", "text": f"Frame {timestamp}:"})
305
- content.append({"type": "image", "url": temp_file.name})
306
- logger.debug(f"{content=}")
307
- return content
308
-
309
-
310
- ##############################################################################
311
- # interleaved <image> 처리
312
- ##############################################################################
313
- def process_interleaved_images(message: dict) -> list[dict]:
314
- parts = re.split(r"(<image>)", message["text"])
315
- content = []
316
- image_index = 0
317
-
318
- image_files = [f for f in message["files"] if re.search(r"\.(png|jpg|jpeg|gif|webp)$", f, re.IGNORECASE)]
319
-
320
- for part in parts:
321
- if part == "<image>" and image_index < len(image_files):
322
- content.append({"type": "image", "url": image_files[image_index]})
323
- image_index += 1
324
- elif part.strip():
325
- content.append({"type": "text", "text": part.strip()})
326
- else:
327
- if isinstance(part, str) and part != "<image>":
328
- content.append({"type": "text", "text": part})
329
- return content
330
-
331
-
332
- ##############################################################################
333
- # PDF + CSV + TXT + 이미지/비디오
334
- ##############################################################################
335
- def is_image_file(file_path: str) -> bool:
336
- return bool(re.search(r"\.(png|jpg|jpeg|gif|webp)$", file_path, re.IGNORECASE))
337
-
338
- def is_video_file(file_path: str) -> bool:
339
- return file_path.endswith(".mp4")
340
-
341
- def is_document_file(file_path: str) -> bool:
342
- return (
343
- file_path.lower().endswith(".pdf")
344
- or file_path.lower().endswith(".csv")
345
- or file_path.lower().endswith(".txt")
346
- )
347
-
348
-
349
- def process_new_user_message(message: dict) -> list[dict]:
350
- if not message["files"]:
351
- return [{"type": "text", "text": message["text"]}]
352
-
353
- video_files = [f for f in message["files"] if is_video_file(f)]
354
- image_files = [f for f in message["files"] if is_image_file(f)]
355
- csv_files = [f for f in message["files"] if f.lower().endswith(".csv")]
356
- txt_files = [f for f in message["files"] if f.lower().endswith(".txt")]
357
- pdf_files = [f for f in message["files"] if f.lower().endswith(".pdf")]
358
-
359
- content_list = [{"type": "text", "text": message["text"]}]
360
-
361
- for csv_path in csv_files:
362
- csv_analysis = analyze_csv_file(csv_path)
363
- content_list.append({"type": "text", "text": csv_analysis})
364
-
365
- for txt_path in txt_files:
366
- txt_analysis = analyze_txt_file(txt_path)
367
- content_list.append({"type": "text", "text": txt_analysis})
368
-
369
- for pdf_path in pdf_files:
370
- pdf_markdown = pdf_to_markdown(pdf_path)
371
- content_list.append({"type": "text", "text": pdf_markdown})
372
-
373
- if video_files:
374
- content_list += process_video(video_files[0])
375
- return content_list
376
-
377
- if "<image>" in message["text"] and image_files:
378
- interleaved_content = process_interleaved_images({"text": message["text"], "files": image_files})
379
- if content_list and content_list[0]["type"] == "text":
380
- content_list = content_list[1:]
381
- return interleaved_content + content_list
382
- else:
383
- for img_path in image_files:
384
- content_list.append({"type": "image", "url": img_path})
385
-
386
- return content_list
387
-
388
-
389
- ##############################################################################
390
- # history -> LLM 메시지 변환
391
- ##############################################################################
392
- def process_history(history: list[dict]) -> list[dict]:
393
- messages = []
394
- current_user_content: list[dict] = []
395
- for item in history:
396
- if item["role"] == "assistant":
397
- if current_user_content:
398
- messages.append({"role": "user", "content": current_user_content})
399
- current_user_content = []
400
- messages.append({"role": "assistant", "content": [{"type": "text", "text": item["content"]}]})
401
- else:
402
- content = item["content"]
403
- if isinstance(content, str):
404
- current_user_content.append({"type": "text", "text": content})
405
- elif isinstance(content, list) and len(content) > 0:
406
- file_path = content[0]
407
- if is_image_file(file_path):
408
- current_user_content.append({"type": "image", "url": file_path})
409
- else:
410
- current_user_content.append({"type": "text", "text": f"[File: {os.path.basename(file_path)}]"})
411
-
412
- if current_user_content:
413
- messages.append({"role": "user", "content": current_user_content})
414
-
415
- return messages
416
-
417
-
418
- ##############################################################################
419
- # 메인 추론 함수 (web search 체크 시 자동 키워드추출->검색->결과 system msg)
420
- ##############################################################################
421
- @spaces.GPU(duration=120)
422
- def run(
423
- message: dict,
424
- history: list[dict],
425
- system_prompt: str = "",
426
- max_new_tokens: int = 512,
427
- use_web_search: bool = False,
428
- web_search_query: str = "",
429
- ) -> Iterator[str]:
430
-
431
- if not validate_media_constraints(message, history):
432
- yield ""
433
- return
434
-
435
- try:
436
- combined_system_msg = ""
437
-
438
- # 내부적으로만 사용 (UI에서는 보이지 않음)
439
- if system_prompt.strip():
440
- combined_system_msg += f"[System Prompt]\n{system_prompt.strip()}\n\n"
441
-
442
- if use_web_search:
443
- user_text = message["text"]
444
- ws_query = extract_keywords(user_text, top_k=5)
445
- if ws_query.strip():
446
- logger.info(f"[Auto WebSearch Keyword] {ws_query!r}")
447
- ws_result = do_web_search(ws_query)
448
- combined_system_msg += f"[Search top-20 Full Items Based on user prompt]\n{ws_result}\n\n"
449
- # >>> 추가된 안내 문구 (검색 결과의 link 등 출처를 활용)
450
- combined_system_msg += "[참고: 위 검색결과 내용과 link를 출처로 인용하여 답변해 주세요.]\n\n"
451
- else:
452
- combined_system_msg += "[No valid keywords found, skipping WebSearch]\n\n"
453
-
454
- messages = []
455
- if combined_system_msg.strip():
456
- messages.append({
457
- "role": "system",
458
- "content": [{"type": "text", "text": combined_system_msg.strip()}],
459
- })
460
-
461
- messages.extend(process_history(history))
462
-
463
- user_content = process_new_user_message(message)
464
- for item in user_content:
465
- if item["type"] == "text" and len(item["text"]) > MAX_CONTENT_CHARS:
466
- item["text"] = item["text"][:MAX_CONTENT_CHARS] + "\n...(truncated)..."
467
- messages.append({"role": "user", "content": user_content})
468
-
469
- inputs = processor.apply_chat_template(
470
- messages,
471
- add_generation_prompt=True,
472
- tokenize=True,
473
- return_dict=True,
474
- return_tensors="pt",
475
- ).to(device=model.device, dtype=torch.bfloat16)
476
-
477
- streamer = TextIteratorStreamer(processor, timeout=30.0, skip_prompt=True, skip_special_tokens=True)
478
- gen_kwargs = dict(
479
- inputs,
480
- streamer=streamer,
481
- max_new_tokens=max_new_tokens,
482
- )
483
-
484
- t = Thread(target=_model_gen_with_oom_catch, kwargs=gen_kwargs)
485
- t.start()
486
-
487
- output = ""
488
- for new_text in streamer:
489
- output += new_text
490
- yield output
491
-
492
- except Exception as e:
493
- logger.error(f"Error in run: {str(e)}")
494
- yield f"죄송합니다. 오류가 발생했습니다: {str(e)}"
495
-
496
-
497
- ##############################################################################
498
- # [추가] 별도 함수에서 model.generate(...)를 호출, OOM 캐치
499
- ##############################################################################
500
- def _model_gen_with_oom_catch(**kwargs):
501
- """
502
- 별도 스레드에서 OutOfMemoryError를 잡아주기 위해
503
- """
504
- try:
505
- model.generate(**kwargs)
506
- except torch.cuda.OutOfMemoryError:
507
- raise RuntimeError(
508
- "[OutOfMemoryError] GPU 메모리가 부족합니다. "
509
- "Max New Tokens을 줄이거나, 프롬프트 길이를 줄여주세요."
510
- )
511
-
512
-
513
- ##############################################################################
514
- # 예시들 (한글화)
515
- ##############################################################################
516
- examples = [
517
- [
518
- {
519
- "text": "두 PDF 파일 내용을 비교하라.",
520
- "files": [
521
- "assets/additional-examples/before.pdf",
522
- "assets/additional-examples/after.pdf",
523
- ],
524
- }
525
- ],
526
- [
527
- {
528
- "text": "CSV 파일 내용을 요약, 분석하라",
529
- "files": ["assets/additional-examples/sample-csv.csv"],
530
- }
531
- ],
532
- [
533
- {
534
- "text": "이 영상의 내용을 설명하라",
535
- "files": ["assets/additional-examples/tmp.mp4"],
536
- }
537
- ],
538
- [
539
- {
540
- "text": "표지 내용을 설명하고 글자를 읽어주세요.",
541
- "files": ["assets/additional-examples/maz.jpg"],
542
- }
543
- ],
544
- [
545
- {
546
- "text": "이미 이 영양제를 <image> 가지고 있고, 이 제품 <image>을 새로 사려 합니다. 함께 섭취할 때 주의해야 할 점이 있을까요?",
547
- "files": ["assets/additional-examples/pill1.png", "assets/additional-examples/pill2.png"],
548
- }
549
- ],
550
- [
551
- {
552
- "text": "이 적분을 풀어주세요.",
553
- "files": ["assets/additional-examples/4.png"],
554
- }
555
- ],
556
- [
557
- {
558
- "text": "이 티켓은 언제 발급된 것이고, 가격은 얼마인가요?",
559
- "files": ["assets/additional-examples/2.png"],
560
- }
561
- ],
562
- [
563
- {
564
- "text": "이미지들의 순서를 바탕으로 짧은 이야기를 만들어 주세요.",
565
- "files": [
566
- "assets/sample-images/09-1.png",
567
- "assets/sample-images/09-2.png",
568
- "assets/sample-images/09-3.png",
569
- "assets/sample-images/09-4.png",
570
- "assets/sample-images/09-5.png",
571
- ],
572
- }
573
- ],
574
-
575
- [
576
- {
577
- "text": "동일한 막대 그래프를 그리는 matplotlib 코드를 작성해주세요.",
578
- "files": ["assets/additional-examples/barchart.png"],
579
- }
580
- ],
581
- [
582
- {
583
- "text": "이미지에 있는 텍스트를 그대로 읽어서 마크다운 형태로 적어주세요.",
584
- "files": ["assets/additional-examples/3.png"],
585
- }
586
- ],
587
- [
588
- {
589
- "text": "이 표지판에는 무슨 문구가 적혀 있나요?",
590
- "files": ["assets/sample-images/02.png"],
591
- }
592
- ],
593
- [
594
- {
595
- "text": "두 이미지를 비교해서 공통점과 차이점을 말해주세요.",
596
- "files": ["assets/sample-images/03.png"],
597
- }
598
- ],
599
- [
600
- {
601
- "text": "너는 친근하고 다정한 이해심 많은 여자친구 역할이다.",
602
- }
603
- ],
604
- [
605
- {
606
- "text": """인류의 마지막 시험(Humanity's Last Exam) 문제를 풀이하라('Deep Research' 버튼 클릭할것) Which was the first statute in the modern State of Israel to explicitly introduce the concept of "good faith"? (Do not append "the" or the statute's year to the answer.)""",
607
- }
608
- ],
609
- [
610
- {
611
- "text": """인류의 마지막 시험(Humanity's Last Exam) 문제를 풀이하라. How does Guarani's nominal tense/aspect system interact with effected objects in sentences?
612
-
613
- Answer Choices:
614
- A. Effected objects cannot take nominal tense/aspect markers
615
- B. Effected objects require the post-stative -kue
616
- C. Effected objects must be marked with the destinative -rã
617
- D. Nominal tense/aspect is optional for effected objects
618
- E. Effected objects use a special set of tense/aspect markers""",
619
- }
620
- ],
621
- ]
622
-
623
-
624
- ##############################################################################
625
- # Gradio UI (Blocks) 구성 (좌측 사이드 메뉴 없이 전체화면 채팅)
626
- ##############################################################################
627
- css = """
628
- /* 1) UI를 처음부터 가장 넓게 (width 100%) 고정하여 표시 */
629
- .gradio-container {
630
- background: rgba(255, 255, 255, 0.95);
631
- border-radius: 15px;
632
- padding: 30px 40px;
633
- box-shadow: 0 8px 30px rgba(0, 0, 0, 0.3);
634
- margin: 20px auto; /* 위아래 여백만 유지 */
635
- width: 100% !important;
636
- max-width: none !important; /* 1200px 제한 제거 */
637
- }
638
-
639
- .fillable {
640
- width: 100% !important;
641
- max-width: 100% !important;
642
- }
643
-
644
- /* 2) 배경을 연하고 투명한 파스텔 톤 그라디언트로 변경 */
645
- body {
646
- background: linear-gradient(
647
- 135deg,
648
- rgba(255, 229, 210, 0.6),
649
- rgba(255, 240, 245, 0.6)
650
- );
651
- margin: 0;
652
- padding: 0;
653
- font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
654
- color: #333;
655
- }
656
-
657
- /* 버튼 색상도 기존의 짙은 붉은-주황 → 파스텔 계열로 연하게 */
658
- button, .btn {
659
- background: linear-gradient(
660
- 90deg,
661
- rgba(255, 210, 220, 0.7),
662
- rgba(255, 190, 200, 0.7)
663
- ) !important;
664
- border: none;
665
- color: #333; /* 글자 잘 보이도록 약간 진한 글씨 */
666
- padding: 12px 24px;
667
- text-transform: uppercase;
668
- font-weight: bold;
669
- letter-spacing: 1px;
670
- border-radius: 5px;
671
- cursor: pointer;
672
- transition: transform 0.2s ease-in-out;
673
- }
674
-
675
- button:hover, .btn:hover {
676
- transform: scale(1.03);
677
- }
678
-
679
- #examples_container {
680
- margin: auto;
681
- width: 90%;
682
- }
683
-
684
- #examples_row {
685
- justify-content: center;
686
- }
687
- """
688
-
689
- title_html = """
690
- <h1 align="center" style="margin-bottom: 0.2em; font-size: 1.6em;"> 🤗 Gemma3-uncensored-R27B </h1>
691
- <p align="center" style="font-size:1.1em; color:#555;">
692
- ✅Agentic AI Platform ✅Reasoning & Uncensored ✅Multimodal & VLM ✅Deep-Research & RAG <br>
693
- Operates on an ✅'NVIDIA A100 GPU' as an independent local server, enhancing security and preventing information leakage.<br>
694
- @Based by 'MS Gemma-3-27b' / @Powered by 'MOUSE-II'(VIDRAFT)
695
- </p>
696
- """
697
-
698
- with gr.Blocks(css=css, title="Gemma3-uncensored-R27B") as demo:
699
- gr.Markdown(title_html)
700
-
701
- # 웹서치 옵션은 화면에 표시 (하지만 시스템 프롬프트, 토큰 슬라이더 등은 감춤)
702
- web_search_checkbox = gr.Checkbox(
703
- label="Deep Research",
704
- value=False
705
- )
706
-
707
- # 내부적으로 쓰이지만 화면에는 노출되지 않도록 설정
708
- system_prompt_box = gr.Textbox(
709
- lines=3,
710
- value="반드시 한글로 답변하라. You are a deep thinking AI, you may use extremely long chains of thought to deeply consider the problem and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. Please answer in Korean.You have the ability to read English sources, but you **must always speak in Korean**.Even if the search results are in English, answer in Korean.",
711
- visible=False # 화면에서 감춤
712
- )
713
-
714
- max_tokens_slider = gr.Slider(
715
- label="Max New Tokens",
716
- minimum=100,
717
- maximum=8000,
718
- step=50,
719
- value=1000,
720
- visible=False # 화면에서 감춤
721
- )
722
-
723
- web_search_text = gr.Textbox(
724
- lines=1,
725
- label="(Unused) Web Search Query",
726
- placeholder="No direct input needed",
727
- visible=False # 화면에서 감춤
728
- )
729
-
730
- # 채팅 인터페이스 구성
731
- chat = gr.ChatInterface(
732
- fn=run,
733
- type="messages",
734
- chatbot=gr.Chatbot(type="messages", scale=1, allow_tags=["image"]),
735
- textbox=gr.MultimodalTextbox(
736
- file_types=[
737
- ".webp", ".png", ".jpg", ".jpeg", ".gif",
738
- ".mp4", ".csv", ".txt", ".pdf"
739
- ],
740
- file_count="multiple",
741
- autofocus=True
742
- ),
743
- multimodal=True,
744
- additional_inputs=[
745
- system_prompt_box,
746
- max_tokens_slider,
747
- web_search_checkbox,
748
- web_search_text,
749
- ],
750
- stop_btn=False,
751
- title='<a href="https://discord.gg/openfreeai" target="_blank">https://discord.gg/openfreeai</a>',
752
- examples=examples,
753
- run_examples_on_click=False,
754
- cache_examples=False,
755
- css_paths=None,
756
- delete_cache=(1800, 1800),
757
- )
758
-
759
- # 예제 섹션 - 이미 ChatInterface에 examples가 설정되어 있으므로 여기서는 설명만 표시
760
- with gr.Row(elem_id="examples_row"):
761
- with gr.Column(scale=12, elem_id="examples_container"):
762
- gr.Markdown("### Example Inputs (click to load)")
763
-
764
-
765
- if __name__ == "__main__":
766
- # 로컬에서만 실행 시
767
- demo.launch()
768
-