Spaces:
Sleeping
Sleeping
import gradio as gr | |
import requests | |
from bs4 import BeautifulSoup | |
import urllib.parse # iframe ๊ฒฝ๋ก ๋ณด์ ์ ์ํ ๋ชจ๋ | |
import re | |
import logging | |
import tempfile | |
import pandas as pd | |
import mecab # pythonโmecabโko ๋ผ์ด๋ธ๋ฌ๋ฆฌ ์ฌ์ฉ | |
import os | |
import time | |
import hmac | |
import hashlib | |
import base64 | |
# ๋๋ฒ๊น (๋ก๊ทธ)์ฉ ํจ์ | |
def debug_log(message: str): | |
print(f"[DEBUG] {message}") | |
# --- ๋ค์ด๋ฒ ๋ธ๋ก๊ทธ ์คํฌ๋ํ --- | |
def scrape_naver_blog(url: str) -> str: | |
debug_log("scrape_naver_blog ํจ์ ์์") | |
debug_log(f"์์ฒญ๋ฐ์ URL: {url}") | |
headers = { | |
"User-Agent": ( | |
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) " | |
"AppleWebKit/537.36 (KHTML, like Gecko) " | |
"Chrome/96.0.4664.110 Safari/537.36" | |
) | |
} | |
try: | |
response = requests.get(url, headers=headers) | |
debug_log("HTTP GET ์์ฒญ(๋ฉ์ธ ํ์ด์ง) ์๋ฃ") | |
if response.status_code != 200: | |
debug_log(f"์์ฒญ ์คํจ, ์ํ์ฝ๋: {response.status_code}") | |
return f"์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค. ์ํ์ฝ๋: {response.status_code}" | |
soup = BeautifulSoup(response.text, "html.parser") | |
debug_log("HTML ํ์ฑ(๋ฉ์ธ ํ์ด์ง) ์๋ฃ") | |
iframe = soup.select_one("iframe#mainFrame") | |
if not iframe: | |
debug_log("iframe#mainFrame ํ๊ทธ๋ฅผ ์ฐพ์ ์ ์์ต๋๋ค.") | |
return "๋ณธ๋ฌธ iframe์ ์ฐพ์ ์ ์์ต๋๋ค." | |
iframe_src = iframe.get("src") | |
if not iframe_src: | |
debug_log("iframe src๊ฐ ์กด์ฌํ์ง ์์ต๋๋ค.") | |
return "๋ณธ๋ฌธ iframe์ src๋ฅผ ์ฐพ์ ์ ์์ต๋๋ค." | |
parsed_iframe_url = urllib.parse.urljoin(url, iframe_src) | |
debug_log(f"iframe ํ์ด์ง ์์ฒญ URL: {parsed_iframe_url}") | |
iframe_response = requests.get(parsed_iframe_url, headers=headers) | |
debug_log("HTTP GET ์์ฒญ(iframe ํ์ด์ง) ์๋ฃ") | |
if iframe_response.status_code != 200: | |
debug_log(f"iframe ์์ฒญ ์คํจ, ์ํ์ฝ๋: {iframe_response.status_code}") | |
return f"iframe์์ ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค. ์ํ์ฝ๋: {iframe_response.status_code}" | |
iframe_soup = BeautifulSoup(iframe_response.text, "html.parser") | |
debug_log("HTML ํ์ฑ(iframe ํ์ด์ง) ์๋ฃ") | |
title_div = iframe_soup.select_one('.se-module.se-module-text.se-title-text') | |
title = title_div.get_text(strip=True) if title_div else "์ ๋ชฉ์ ์ฐพ์ ์ ์์ต๋๋ค." | |
debug_log(f"์ถ์ถ๋ ์ ๋ชฉ: {title}") | |
content_div = iframe_soup.select_one('.se-main-container') | |
if content_div: | |
content = content_div.get_text("\n", strip=True) | |
else: | |
content = "๋ณธ๋ฌธ์ ์ฐพ์ ์ ์์ต๋๋ค." | |
debug_log("๋ณธ๋ฌธ ์ถ์ถ ์๋ฃ") | |
result = f"[์ ๋ชฉ]\n{title}\n\n[๋ณธ๋ฌธ]\n{content}" | |
debug_log("์ ๋ชฉ๊ณผ ๋ณธ๋ฌธ ํฉ์นจ ์๋ฃ") | |
return result | |
except Exception as e: | |
debug_log(f"์๋ฌ ๋ฐ์: {str(e)}") | |
return f"์คํฌ๋ํ ์ค ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {str(e)}" | |
# --- ํํ์ ๋ถ์ (์ฐธ์กฐ์ฝ๋-1) --- | |
def analyze_text(text: str): | |
logging.basicConfig(level=logging.DEBUG) | |
logger = logging.getLogger(__name__) | |
logger.debug("์๋ณธ ํ ์คํธ: %s", text) | |
filtered_text = re.sub(r'[^๊ฐ-ํฃ]', '', text) | |
logger.debug("ํํฐ๋ง๋ ํ ์คํธ: %s", filtered_text) | |
if not filtered_text: | |
logger.debug("์ ํจํ ํ๊ตญ์ด ํ ์คํธ๊ฐ ์์.") | |
return pd.DataFrame(columns=["๋จ์ด", "๋น๋์"]), "" | |
mecab_instance = mecab.MeCab() | |
tokens = mecab_instance.pos(filtered_text) | |
logger.debug("ํํ์ ๋ถ์ ๊ฒฐ๊ณผ: %s", tokens) | |
freq = {} | |
for word, pos in tokens: | |
if word and word.strip() and pos.startswith("NN"): | |
freq[word] = freq.get(word, 0) + 1 | |
logger.debug("๋จ์ด: %s, ํ์ฌ: %s, ๋น๋: %d", word, pos, freq[word]) | |
sorted_freq = sorted(freq.items(), key=lambda x: x[1], reverse=True) | |
logger.debug("์ ๋ ฌ๋ ๋จ์ด ๋น๋: %s", sorted_freq) | |
df = pd.DataFrame(sorted_freq, columns=["๋จ์ด", "๋น๋์"]) | |
logger.debug("ํํ์ ๋ถ์ DataFrame ์์ฑ๋จ, shape: %s", df.shape) | |
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".xlsx") | |
df.to_excel(temp_file.name, index=False, engine='openpyxl') | |
temp_file.close() | |
logger.debug("Excel ํ์ผ ์์ฑ๋จ: %s", temp_file.name) | |
return df, temp_file.name | |
# --- ๋ค์ด๋ฒ ๊ฒ์ ๋ฐ ๊ด๊ณ API ๊ด๋ จ (์ฐธ์กฐ์ฝ๋-2) --- | |
def generate_signature(timestamp, method, uri, secret_key): | |
message = f"{timestamp}.{method}.{uri}" | |
digest = hmac.new(secret_key.encode("utf-8"), message.encode("utf-8"), hashlib.sha256).digest() | |
return base64.b64encode(digest).decode() | |
def get_header(method, uri, api_key, secret_key, customer_id): | |
timestamp = str(round(time.time() * 1000)) | |
signature = generate_signature(timestamp, method, uri, secret_key) | |
return { | |
"Content-Type": "application/json; charset=UTF-8", | |
"X-Timestamp": timestamp, | |
"X-API-KEY": api_key, | |
"X-Customer": str(customer_id), | |
"X-Signature": signature | |
} | |
def fetch_related_keywords(keyword): | |
debug_log(f"fetch_related_keywords ํธ์ถ, ํค์๋: {keyword}") | |
API_KEY = os.environ["NAVER_API_KEY"] | |
SECRET_KEY = os.environ["NAVER_SECRET_KEY"] | |
CUSTOMER_ID = os.environ["NAVER_CUSTOMER_ID"] | |
BASE_URL = "https://api.naver.com" | |
uri = "/keywordstool" | |
method = "GET" | |
headers = get_header(method, uri, API_KEY, SECRET_KEY, CUSTOMER_ID) | |
params = { | |
"hintKeywords": [keyword], | |
"showDetail": "1" | |
} | |
response = requests.get(BASE_URL + uri, params=params, headers=headers) | |
data = response.json() | |
if "keywordList" not in data: | |
return pd.DataFrame() | |
df = pd.DataFrame(data["keywordList"]) | |
if len(df) > 100: | |
df = df.head(100) | |
def parse_count(x): | |
try: | |
return int(str(x).replace(",", "")) | |
except: | |
return 0 | |
df["PC์๊ฒ์๋"] = df["monthlyPcQcCnt"].apply(parse_count) | |
df["๋ชจ๋ฐ์ผ์๊ฒ์๋"] = df["monthlyMobileQcCnt"].apply(parse_count) | |
df["ํ ํ์๊ฒ์๋"] = df["PC์๊ฒ์๋"] + df["๋ชจ๋ฐ์ผ์๊ฒ์๋"] | |
df.rename(columns={"relKeyword": "์ ๋ณดํค์๋"}, inplace=True) | |
result_df = df[["์ ๋ณดํค์๋", "PC์๊ฒ์๋", "๋ชจ๋ฐ์ผ์๊ฒ์๋", "ํ ํ์๊ฒ์๋"]] | |
debug_log("fetch_related_keywords ์๋ฃ") | |
return result_df | |
def fetch_blog_count(keyword): | |
debug_log(f"fetch_blog_count ํธ์ถ, ํค์๋: {keyword}") | |
client_id = os.environ["NAVER_SEARCH_CLIENT_ID"] | |
client_secret = os.environ["NAVER_SEARCH_CLIENT_SECRET"] | |
url = "https://openapi.naver.com/v1/search/blog.json" | |
headers = { | |
"X-Naver-Client-Id": client_id, | |
"X-Naver-Client-Secret": client_secret | |
} | |
params = {"query": keyword, "display": 1} | |
response = requests.get(url, headers=headers, params=params) | |
if response.status_code == 200: | |
data = response.json() | |
debug_log(f"fetch_blog_count ๊ฒฐ๊ณผ: {data.get('total', 0)}") | |
return data.get("total", 0) | |
else: | |
debug_log(f"fetch_blog_count ์ค๋ฅ, ์ํ์ฝ๋: {response.status_code}") | |
return 0 | |
def create_excel_file(df): | |
with tempfile.NamedTemporaryFile(suffix=".xlsx", delete=False) as tmp: | |
excel_path = tmp.name | |
df.to_excel(excel_path, index=False) | |
debug_log(f"Excel ํ์ผ ์์ฑ๋จ: {excel_path}") | |
return excel_path | |
def process_keyword(keywords: str, include_related: bool): | |
debug_log(f"process_keyword ํธ์ถ, ํค์๋๋ค: {keywords}, ์ฐ๊ด๊ฒ์์ด ํฌํจ: {include_related}") | |
input_keywords = [k.strip() for k in keywords.splitlines() if k.strip()] | |
result_dfs = [] | |
for idx, kw in enumerate(input_keywords): | |
df_kw = fetch_related_keywords(kw) | |
if df_kw.empty: | |
continue | |
row_kw = df_kw[df_kw["์ ๋ณดํค์๋"] == kw] | |
if not row_kw.empty: | |
result_dfs.append(row_kw) | |
else: | |
result_dfs.append(df_kw.head(1)) | |
if include_related and idx == 0: | |
df_related = df_kw[df_kw["์ ๋ณดํค์๋"] != kw] | |
if not df_related.empty: | |
result_dfs.append(df_related) | |
if result_dfs: | |
result_df = pd.concat(result_dfs, ignore_index=True) | |
result_df.drop_duplicates(subset=["์ ๋ณดํค์๋"], inplace=True) | |
else: | |
result_df = pd.DataFrame(columns=["์ ๋ณดํค์๋", "PC์๊ฒ์๋", "๋ชจ๋ฐ์ผ์๊ฒ์๋", "ํ ํ์๊ฒ์๋"]) | |
result_df["๋ธ๋ก๊ทธ๋ฌธ์์"] = result_df["์ ๋ณดํค์๋"].apply(fetch_blog_count) | |
result_df.sort_values(by="ํ ํ์๊ฒ์๋", ascending=False, inplace=True) | |
debug_log("process_keyword ์๋ฃ") | |
return result_df, create_excel_file(result_df) | |
# --- ํํ์ ๋ถ์๊ณผ ๊ฒ์๋/๋ธ๋ก๊ทธ๋ฌธ์์ ๋ณํฉ --- | |
def morphological_analysis_and_enrich(text: str, remove_freq1: bool): | |
debug_log("morphological_analysis_and_enrich ํจ์ ์์") | |
df_freq, _ = analyze_text(text) | |
if df_freq.empty: | |
debug_log("ํํ์ ๋ถ์ ๊ฒฐ๊ณผ๊ฐ ๋น ๋ฐ์ดํฐํ๋ ์์ ๋๋ค.") | |
return df_freq, "" | |
if remove_freq1: | |
before_shape = df_freq.shape | |
df_freq = df_freq[df_freq["๋น๋์"] != 1] | |
debug_log(f"๋น๋์ 1 ์ ๊ฑฐ ์ ์ฉ๋จ. {before_shape} -> {df_freq.shape}") | |
keywords = "\n".join(df_freq["๋จ์ด"].tolist()) | |
debug_log(f"๋ถ์๋ ํค์๋: {keywords}") | |
df_keyword_info, _ = process_keyword(keywords, include_related=False) | |
debug_log("๊ฒ์๋ ๋ฐ ๋ธ๋ก๊ทธ๋ฌธ์์ ์กฐํ ์๋ฃ") | |
merged_df = pd.merge(df_freq, df_keyword_info, left_on="๋จ์ด", right_on="์ ๋ณดํค์๋", how="left") | |
merged_df.drop(columns=["์ ๋ณดํค์๋"], inplace=True) | |
merged_excel_path = create_excel_file(merged_df) | |
debug_log("morphological_analysis_and_enrich ํจ์ ์๋ฃ") | |
return merged_df, merged_excel_path | |
# --- ์ง์ ํค์๋ ๋ถ์ (๋จ๋ ๋ถ์) --- | |
def direct_keyword_analysis(text: str, keyword_input: str): | |
debug_log("direct_keyword_analysis ํจ์ ์์") | |
keywords = re.split(r'[\n,]+', keyword_input) | |
keywords = [kw.strip() for kw in keywords if kw.strip()] | |
debug_log(f"์ ๋ ฅ๋ ํค์๋ ๋ชฉ๋ก: {keywords}") | |
results = [] | |
for kw in keywords: | |
count = text.count(kw) | |
results.append((kw, count)) | |
debug_log(f"ํค์๋ '{kw}'์ ๋น๋์: {count}") | |
df = pd.DataFrame(results, columns=["ํค์๋", "๋น๋์"]) | |
excel_path = create_excel_file(df) | |
debug_log("direct_keyword_analysis ํจ์ ์๋ฃ") | |
return df, excel_path | |
# --- ํตํฉ ๋ถ์ (ํํ์ ๋ถ์ + ์ง์ ํค์๋ ๋ถ์) --- | |
def combined_analysis(blog_text: str, remove_freq1: bool, direct_keyword_input: str): | |
debug_log("combined_analysis ํจ์ ์์") | |
merged_df, _ = morphological_analysis_and_enrich(blog_text, remove_freq1) | |
if "์ง์ ์ ๋ ฅ" not in merged_df.columns: | |
merged_df["์ง์ ์ ๋ ฅ"] = "" | |
direct_keywords = re.split(r'[\n,]+', direct_keyword_input) | |
direct_keywords = [kw.strip() for kw in direct_keywords if kw.strip()] | |
debug_log(f"์ ๋ ฅ๋ ์ง์ ํค์๋: {direct_keywords}") | |
for dk in direct_keywords: | |
if dk in merged_df["๋จ์ด"].values: | |
merged_df.loc[merged_df["๋จ์ด"] == dk, "์ง์ ์ ๋ ฅ"] = "์ง์ ์ ๋ ฅ" | |
else: | |
freq = blog_text.count(dk) | |
df_direct, _ = process_keyword(dk, include_related=False) | |
if (not df_direct.empty) and (dk in df_direct["์ ๋ณดํค์๋"].values): | |
row = df_direct[df_direct["์ ๋ณดํค์๋"] == dk].iloc[0] | |
pc = row.get("PC์๊ฒ์๋", None) | |
mobile = row.get("๋ชจ๋ฐ์ผ์๊ฒ์๋", None) | |
total = row.get("ํ ํ์๊ฒ์๋", None) | |
blog_count = row.get("๋ธ๋ก๊ทธ๋ฌธ์์", None) | |
else: | |
pc = mobile = total = blog_count = None | |
new_row = { | |
"๋จ์ด": dk, | |
"๋น๋์": freq, | |
"PC์๊ฒ์๋": pc, | |
"๋ชจ๋ฐ์ผ์๊ฒ์๋": mobile, | |
"ํ ํ์๊ฒ์๋": total, | |
"๋ธ๋ก๊ทธ๋ฌธ์์": blog_count, | |
"์ง์ ์ ๋ ฅ": "์ง์ ์ ๋ ฅ" | |
} | |
merged_df = pd.concat([merged_df, pd.DataFrame([new_row])], ignore_index=True) | |
merged_df = merged_df.sort_values(by="๋น๋์", ascending=False).reset_index(drop=True) | |
combined_excel = create_excel_file(merged_df) | |
debug_log("combined_analysis ํจ์ ์๋ฃ") | |
return merged_df, combined_excel | |
# --- ๋ถ์ ํธ๋ค๋ฌ --- | |
def analysis_handler(blog_text: str, remove_freq1: bool, direct_keyword_input: str, direct_keyword_only: bool): | |
debug_log("analysis_handler ํจ์ ์์") | |
if direct_keyword_only: | |
# "์ง์ ํค์๋ ์ ๋ ฅ๋ง ๋ถ์" ์ ํ ์ ๋จ๋ ๋ถ์ ์ํ | |
return direct_keyword_analysis(blog_text, direct_keyword_input) | |
else: | |
# ๊ธฐ๋ณธ ํตํฉ ๋ถ์ ์ํ | |
return combined_analysis(blog_text, remove_freq1, direct_keyword_input) | |
# --- ์คํฌ๋ํ ์คํ --- | |
def fetch_blog_content(url: str): | |
debug_log("fetch_blog_content ํจ์ ์์") | |
content = scrape_naver_blog(url) | |
debug_log("fetch_blog_content ํจ์ ์๋ฃ") | |
return content | |
# --- Custom CSS --- | |
custom_css = """ | |
/* ์ ์ฒด ์ปจํ ์ด๋ ์คํ์ผ */ | |
.gradio-container { | |
max-width: 960px; | |
margin: auto; | |
font-family: 'Helvetica Neue', Arial, sans-serif; | |
background: #f5f7fa; | |
padding: 2rem; | |
} | |
/* ํค๋ ์คํ์ผ */ | |
.custom-header { | |
text-align: center; | |
font-size: 2.5rem; | |
font-weight: bold; | |
margin-bottom: 1.5rem; | |
color: #333; | |
} | |
/* ๊ทธ๋ฃน ๋ฐ์ค ์คํ์ผ */ | |
.custom-group { | |
background: #ffffff; | |
border-radius: 8px; | |
padding: 1.5rem; | |
box-shadow: 0 2px 8px rgba(0,0,0,0.1); | |
margin-bottom: 1.5rem; | |
} | |
/* ๋ฒํผ ์คํ์ผ */ | |
.custom-button { | |
background-color: #007bff; | |
color: #fff; | |
border: none; | |
border-radius: 4px; | |
padding: 0.6rem 1.2rem; | |
font-size: 1rem; | |
cursor: pointer; | |
} | |
/* ์ฒดํฌ๋ฐ์ค ์คํ์ผ */ | |
.custom-checkbox { | |
margin-right: 1rem; | |
} | |
/* ๊ฒฐ๊ณผ ํ ์ด๋ธ ๋ฐ ๋ค์ด๋ก๋ ๋ฒํผ */ | |
.custom-result { | |
margin-top: 1.5rem; | |
} | |
/* ๊ฐ์ด๋ฐ ์ ๋ ฌ */ | |
.centered { | |
display: flex; | |
justify-content: center; | |
align-items: center; | |
} | |
""" | |
# --- Gradio ์ธํฐํ์ด์ค ๊ตฌ์ฑ --- | |
with gr.Blocks(title="๋ค์ด๋ฒ ๋ธ๋ก๊ทธ ํํ์ ๋ถ์ ์๋น์ค", css=custom_css) as demo: | |
gr.HTML("<div class='custom-header'>๋ค์ด๋ฒ ๋ธ๋ก๊ทธ ํํ์ ๋ถ์ ์๋น์ค</div>") | |
# ๋ธ๋ก๊ทธ ๋งํฌ์ ์คํฌ๋ํ ์คํ ๋ฒํผ์ ํ ๊ทธ๋ฃน ๋ด์ ๋ฐฐ์น (๋ฒํผ์ ๊ฐ์ด๋ฐ ์ ๋ ฌ) | |
with gr.Group(elem_classes="custom-group"): | |
with gr.Row(): | |
blog_url_input = gr.Textbox(label="๋ค์ด๋ฒ ๋ธ๋ก๊ทธ ๋งํฌ", placeholder="์: https://blog.naver.com/ssboost/222983068507", lines=1) | |
with gr.Row(elem_classes="centered"): | |
scrape_button = gr.Button("์คํฌ๋ํ ์คํ", elem_classes="custom-button") | |
with gr.Group(elem_classes="custom-group"): | |
blog_content_box = gr.Textbox(label="๋ธ๋ก๊ทธ ๋ด์ฉ (์์ ๊ฐ๋ฅ)", lines=10, placeholder="์คํฌ๋ํ๋ ๋ธ๋ก๊ทธ ๋ด์ฉ์ด ์ฌ๊ธฐ์ ํ์๋ฉ๋๋ค.") | |
with gr.Group(elem_classes="custom-group"): | |
with gr.Row(): | |
remove_freq_checkbox = gr.Checkbox(label="๋น๋์1 ์ ๊ฑฐ", value=True, elem_classes="custom-checkbox") | |
with gr.Row(): | |
# "๋น๋์1 ์ ๊ฑฐ" ์๋์ "์ง์ ํค์๋ ์ ๋ ฅ๋ง ๋ถ์" ์ฒดํฌ๋ฐ์ค ๋ฐฐ์น | |
direct_keyword_only_checkbox = gr.Checkbox(label="์ง์ ํค์๋ ์ ๋ ฅ๋ง ๋ถ์", value=False, elem_classes="custom-checkbox") | |
with gr.Row(): | |
direct_keyword_box = gr.Textbox(label="์ง์ ํค์๋ ์ ๋ ฅ (์ํฐ ๋๋ ','๋ก ๊ตฌ๋ถ)", lines=2, placeholder="์: ํค์๋1, ํค์๋2\nํค์๋3") | |
with gr.Group(elem_classes="custom-group"): | |
with gr.Row(elem_classes="centered"): | |
analyze_button = gr.Button("๋ถ์ ์คํ", elem_classes="custom-button") | |
with gr.Group(elem_classes="custom-group custom-result"): | |
result_df = gr.Dataframe(label="ํตํฉ ๋ถ์ ๊ฒฐ๊ณผ (๋จ์ด, ๋น๋์, ๊ฒ์๋, ๋ธ๋ก๊ทธ๋ฌธ์์, ์ง์ ์ ๋ ฅ)", interactive=True) | |
with gr.Group(elem_classes="custom-group"): | |
excel_file = gr.File(label="Excel ๋ค์ด๋ก๋") | |
# ์ด๋ฒคํธ ์ฐ๊ฒฐ | |
scrape_button.click(fn=fetch_blog_content, inputs=blog_url_input, outputs=blog_content_box) | |
analyze_button.click(fn=analysis_handler, | |
inputs=[blog_content_box, remove_freq_checkbox, direct_keyword_box, direct_keyword_only_checkbox], | |
outputs=[result_df, excel_file]) | |
if __name__ == "__main__": | |
debug_log("Gradio ์ฑ ์คํ ์์") | |
demo.launch() | |
debug_log("Gradio ์ฑ ์คํ ์ข ๋ฃ") | |