AIRider commited on
Commit
52d4f4d
Β·
verified Β·
1 Parent(s): e621f27

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +116 -132
app.py CHANGED
@@ -7,11 +7,21 @@ import openai
7
  import os
8
  import random
9
  import re
10
-
11
- logging.basicConfig(filename='youtube_script_extractor.log', level=logging.DEBUG,
12
- format='%(asctime)s - %(levelname)s - %(message)s')
13
-
14
- openai.api_key = os.getenv("OPENAI_API_KEY")
 
 
 
 
 
 
 
 
 
 
15
 
16
  def parse_api_response(response):
17
  try:
@@ -25,50 +35,29 @@ def parse_api_response(response):
25
  except Exception as e:
26
  raise ValueError(f"API 응닡 νŒŒμ‹± μ‹€νŒ¨: {str(e)}")
27
 
28
- def split_sentences(text):
29
- sentences = re.split(r"(λ‹ˆλ‹€|μ—μš”|κ΅¬λ‚˜|ν•΄μš”|κ΅°μš”|κ² μ–΄μš”|μ‹œμ˜€|해라|μ˜ˆμš”|μ•„μš”|λ°μš”|λŒ€μš”|μ„Έμš”|μ–΄μš”|κ²Œμš”|κ΅¬μš”|κ³ μš”|λ‚˜μš”|ν•˜μ£ )(?![\w])", text)
30
- combined_sentences = []
31
- current_sentence = ""
32
- for i in range(0, len(sentences), 2):
33
- if i + 1 < len(sentences):
34
- sentence = sentences[i] + sentences[i + 1]
35
- else:
36
- sentence = sentences[i]
37
- if len(current_sentence) + len(sentence) > 100:
38
- combined_sentences.append(current_sentence.strip())
39
- current_sentence = sentence.strip()
40
- else:
41
- current_sentence += sentence
42
- if sentence.endswith(('.', '?', '!')):
43
- combined_sentences.append(current_sentence.strip())
44
- current_sentence = ""
45
- if current_sentence:
46
- combined_sentences.append(current_sentence.strip())
47
- return combined_sentences
48
-
49
  def get_youtube_script(url):
50
  logging.info(f"슀크립트 μΆ”μΆœ μ‹œμž‘: URL = {url}")
51
-
52
  client = Client("whispersound/YT_Ts_R")
53
-
54
  try:
55
  logging.debug("API 호좜 μ‹œμž‘")
56
  result = client.predict(youtube_url=url, api_name="/predict")
57
  logging.debug("API 호좜 μ™„λ£Œ")
58
-
59
  parsed_result = parse_api_response(result)
60
-
61
  title = parsed_result["data"][0]["title"]
62
- transcription_text = parsed_result["data"][0]["transcriptionAsText"]
63
- sections = parsed_result["data"][0]["sections"]
64
-
65
  logging.info("슀크립트 μΆ”μΆœ μ™„λ£Œ")
66
- return title, transcription_text, sections
67
-
 
 
 
68
  except Exception as e:
69
  error_msg = f"슀크립트 μΆ”μΆœ 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
70
  logging.exception(error_msg)
71
- return "", "", []
 
 
 
72
 
73
  def call_api(prompt, max_tokens, temperature, top_p):
74
  try:
@@ -84,122 +73,117 @@ def call_api(prompt, max_tokens, temperature, top_p):
84
  logging.exception("LLM API 호좜 쀑 였λ₯˜ λ°œμƒ")
85
  return "μš”μ•½μ„ μƒμ„±ν•˜λŠ” λ™μ•ˆ 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€. λ‚˜μ€‘μ— λ‹€μ‹œ μ‹œλ„ν•΄ μ£Όμ„Έμš”."
86
 
 
 
 
 
 
 
 
 
 
 
87
  def summarize_section(section_text):
88
- prompt = f"""
89
- λ‹€μŒ 유튜브 λŒ€λ³Έ μ„Ήμ…˜μ˜ 핡심 λ‚΄μš©μ„ κ°„κ²°ν•˜κ²Œ μš”μ•½ν•˜μ„Έμš”:
90
- 1. ν•œκΈ€λ‘œ μž‘μ„±ν•˜μ„Έμš”.
91
- 2. μ£Όμš” 논점과 μ€‘μš”ν•œ 세뢀사항을 ν¬ν•¨ν•˜μ„Έμš”.
92
- 3. μš”μ•½μ€ 2-3λ¬Έμž₯으둜 μ œν•œν•˜μ„Έμš”.
93
 
94
- μ„Ήμ…˜ λ‚΄μš©:
95
  {section_text}
96
- """
97
- return call_api(prompt, max_tokens=150, temperature=0.3, top_p=0.9)
98
 
99
- def format_time(seconds):
100
- minutes, seconds = divmod(seconds, 60)
101
- hours, minutes = divmod(minutes, 60)
102
- return f"{int(hours):02d}:{int(minutes):02d}:{int(seconds):02d}"
103
-
104
- def generate_timeline_summary(sections):
105
- timeline_summary = ""
106
- for i, section in enumerate(sections, 1):
107
- start_time = format_time(section['start_time'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  summary = summarize_section(section['text'])
109
- timeline_summary += f"{start_time} {i}. {summary}\n\n"
110
- return timeline_summary
111
-
112
- def summarize_text(text):
113
- prompt = f"""
114
- 1. λ‹€μŒ μ£Όμ–΄μ§€λŠ” 유튜브 λŒ€λ³Έμ˜ 핡심 μ£Όμ œμ™€ λͺ¨λ“  μ£Όμš” λ‚΄μš©μ„ μƒμ„Έν•˜κ²Œ μš”μ•½ν•˜λΌ
115
- 2. λ°˜λ“œμ‹œ ν•œκΈ€λ‘œ μž‘μ„±ν•˜λΌ
116
- 3. μš”μ•½λ¬Έλ§ŒμœΌλ‘œλ„ μ˜μƒμ„ 직접 μ‹œμ²­ν•œ 것과 λ™μΌν•œ μˆ˜μ€€μœΌλ‘œ λ‚΄μš©μ„ 이해할 수 μžˆλ„λ‘ μƒμ„Ένžˆ μž‘μ„±
117
- 4. 글을 λ„ˆλ¬΄ μ••μΆ•ν•˜κ±°λ‚˜ ν•¨μΆ•ν•˜μ§€ 말고, μ€‘μš”ν•œ λ‚΄μš©κ³Ό 세뢀사항을 λͺ¨λ‘ 포함
118
- 5. λ°˜λ“œμ‹œ λŒ€λ³Έμ˜ 흐름과 논리 ꡬ쑰λ₯Ό μœ μ§€
119
- 6. λ°˜λ“œμ‹œ μ‹œκ°„ μˆœμ„œλ‚˜ μ‚¬κ±΄μ˜ μ „κ°œ 과정을 λͺ…ν™•ν•˜κ²Œ 반영
120
- 7. λ“±μž₯인물, μž₯μ†Œ, 사건 λ“± μ€‘μš”ν•œ μš”μ†Œλ₯Ό μ •ν™•ν•˜κ²Œ μž‘μ„±
121
- 8. λŒ€λ³Έμ—μ„œ μ „λ‹¬ν•˜λŠ” κ°μ •μ΄λ‚˜ λΆ„μœ„κΈ°λ„ 포함
122
- 9. λ°˜λ“œμ‹œ 기술적 μš©μ–΄λ‚˜ μ „λ¬Έ μš©μ–΄κ°€ μžˆμ„ 경우, 이λ₯Ό μ •ν™•ν•˜κ²Œ μ‚¬μš©
123
- 10. λŒ€λ³Έμ˜ λͺ©μ μ΄λ‚˜ μ˜λ„λ₯Ό νŒŒμ•…ν•˜κ³ , 이λ₯Ό μš”μ•½μ— λ°˜λ“œμ‹œ 반영
124
- 11. 전체글을 보고
125
-
126
- ---
127
-
128
- 이 ν”„λ‘¬ν”„νŠΈκ°€ 도움이 λ˜μ‹œκΈΈ λ°”λžλ‹ˆλ‹€.
129
- \n\n
130
- {text}"""
131
-
132
- try:
133
- return call_api(prompt, max_tokens=10000, temperature=0.3, top_p=0.9)
134
- except Exception as e:
135
- logging.exception("μš”μ•½ 생성 쀑 였λ₯˜ λ°œμƒ")
136
- return "μš”μ•½μ„ μƒμ„±ν•˜λŠ” λ™μ•ˆ 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€. λ‚˜μ€‘μ— λ‹€μ‹œ μ‹œλ„ν•΄ μ£Όμ„Έμš”."
137
 
138
  with gr.Blocks() as demo:
139
  gr.Markdown("## YouTube 슀크립트 μΆ”μΆœ 및 μš”μ•½ 도ꡬ")
140
-
141
  youtube_url_input = gr.Textbox(label="YouTube URL μž…λ ₯")
142
  analyze_button = gr.Button("λΆ„μ„ν•˜κΈ°")
143
  script_output = gr.HTML(label="슀크립트")
144
- timeline_output = gr.HTML(label="νƒ€μž„λΌμΈ μš”μ•½")
145
- summary_output = gr.HTML(label="전체 μš”μ•½")
146
-
147
- cached_data = gr.State({"url": "", "title": "", "script": "", "sections": []})
148
 
149
  def extract_and_cache(url, cache):
150
  if url == cache["url"]:
151
- return cache["title"], cache["script"], cache["sections"], cache
152
-
153
- title, script, sections = get_youtube_script(url)
154
- new_cache = {"url": url, "title": title, "script": script, "sections": sections}
155
- return title, script, sections, new_cache
156
 
157
  def display_script(title, script):
158
- if not script:
159
- return "<p>슀크립트λ₯Ό μΆ”μΆœν•˜μ§€ λͺ»ν–ˆμŠ΅λ‹ˆλ‹€. URL을 ν™•μΈν•˜κ³  λ‹€μ‹œ μ‹œλ„ν•΄ μ£Όμ„Έμš”.</p>"
160
- formatted_script = "\n".join(split_sentences(script))
161
- script_html = f"""<h2 style='font-size:24px;'>{title}</h2>
162
- <details>
163
- <summary><h3>원문 슀크립트 (ν΄λ¦­ν•˜μ—¬ 펼치기)</h3></summary>
164
- <div style="white-space: pre-wrap;">{formatted_script}</div>
165
- </details>"""
166
  return script_html
167
 
168
- def display_timeline(sections):
169
- if not sections:
170
- return "<p>νƒ€μž„λΌμΈμ„ μƒμ„±ν•˜μ§€ λͺ»ν–ˆμŠ΅λ‹ˆλ‹€. 슀크립트 μΆ”μΆœμ— μ‹€νŒ¨ν–ˆμ„ 수 μžˆμŠ΅λ‹ˆλ‹€.</p>"
171
- timeline_summary = generate_timeline_summary(sections)
172
- timeline_html = f"""
173
- <h3>νƒ€μž„λΌμΈ μš”μ•½:</h3>
174
- <div style="white-space: pre-wrap; max-height: 400px; overflow-y: auto; border: 1px solid #ccc; padding: 10px;">
175
- {timeline_summary}
176
- </div>
177
- """
178
- return timeline_html
179
-
180
- def generate_summary(script):
181
- if not script:
182
- return "<p>전체 μš”μ•½μ„ μƒμ„±ν•˜μ§€ λͺ»ν–ˆμŠ΅λ‹ˆλ‹€. 슀크립트 μΆ”μΆœμ— μ‹€νŒ¨ν–ˆμ„ 수 μžˆμŠ΅λ‹ˆλ‹€.</p>"
183
- summary = summarize_text(script)
184
- summary_html = f"""
185
- <h3>전체 μš”μ•½:</h3>
186
- <div style="white-space: pre-wrap; max-height: 400px; overflow-y: auto; border: 1px solid #ccc; padding: 10px;">
187
- {summary}
188
- </div>
189
- """
190
- return summary_html
191
-
192
- def analyze(url, cache):
193
- title, script, sections, new_cache = extract_and_cache(url, cache)
194
- script_html = display_script(title, script)
195
- timeline_html = display_timeline(sections)
196
- summary_html = generate_summary(script)
197
- return script_html, timeline_html, summary_html, new_cache
198
 
199
  analyze_button.click(
200
- analyze,
201
- inputs=[youtube_url_input, cached_data],
202
- outputs=[script_output, timeline_output, summary_output, cached_data]
 
 
 
 
203
  )
204
 
205
  demo.launch(share=True)
 
7
  import os
8
  import random
9
  import re
10
+ import nltk
11
+ import numpy as np
12
+ from sklearn.feature_extraction.text import TfidfVectorizer
13
+ from sklearn.metrics.pairwise import cosine_similarity
14
+ import urllib.parse
15
+
16
+ # nltk 데이터 λ‹€μš΄λ‘œλ“œ (졜초 ν•œ 번 μ‹€ν–‰)
17
+ nltk.download('punkt')
18
+
19
+ # λ‘œκΉ… μ„€μ •
20
+ logging.basicConfig(
21
+ filename='youtube_script_extractor.log',
22
+ level=logging.DEBUG,
23
+ format='%(asctime)s - %(levelname)s - %(message)s'
24
+ )
25
 
26
  def parse_api_response(response):
27
  try:
 
35
  except Exception as e:
36
  raise ValueError(f"API 응닡 νŒŒμ‹± μ‹€νŒ¨: {str(e)}")
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  def get_youtube_script(url):
39
  logging.info(f"슀크립트 μΆ”μΆœ μ‹œμž‘: URL = {url}")
 
40
  client = Client("whispersound/YT_Ts_R")
 
41
  try:
42
  logging.debug("API 호좜 μ‹œμž‘")
43
  result = client.predict(youtube_url=url, api_name="/predict")
44
  logging.debug("API 호좜 μ™„λ£Œ")
 
45
  parsed_result = parse_api_response(result)
 
46
  title = parsed_result["data"][0]["title"]
47
+ transcription = parsed_result["data"][0]["transcription"]
 
 
48
  logging.info("슀크립트 μΆ”μΆœ μ™„λ£Œ")
49
+ script_json = json.dumps({
50
+ "title": title,
51
+ "transcription": transcription
52
+ })
53
+ return title, script_json
54
  except Exception as e:
55
  error_msg = f"슀크립트 μΆ”μΆœ 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
56
  logging.exception(error_msg)
57
+ return "", ""
58
+
59
+ # OpenAI API ν‚€ μ„€μ •
60
+ openai.api_key = os.getenv("OPENAI_API_KEY")
61
 
62
  def call_api(prompt, max_tokens, temperature, top_p):
63
  try:
 
73
  logging.exception("LLM API 호좜 쀑 였λ₯˜ λ°œμƒ")
74
  return "μš”μ•½μ„ μƒμ„±ν•˜λŠ” λ™μ•ˆ 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€. λ‚˜μ€‘μ— λ‹€μ‹œ μ‹œλ„ν•΄ μ£Όμ„Έμš”."
75
 
76
+ def extract_video_id(url):
77
+ parsed_url = urllib.parse.urlparse(url)
78
+ if parsed_url.hostname in ('www.youtube.com', 'youtube.com'):
79
+ query_params = urllib.parse.parse_qs(parsed_url.query)
80
+ return query_params.get('v', [None])[0]
81
+ elif parsed_url.hostname == 'youtu.be':
82
+ return parsed_url.path[1:]
83
+ else:
84
+ return None
85
+
86
  def summarize_section(section_text):
87
+ prompt = f"""λ‹€μŒ λ‚΄μš©μ˜ 핡심을 μš”μ•½ν•΄ μ£Όμ„Έμš”:
 
 
 
 
88
 
 
89
  {section_text}
 
 
90
 
91
+ μš”μ•½μ€ ν•œκ΅­μ–΄λ‘œ κ°„κ²°ν•˜κ²Œ μž‘μ„±ν•΄ μ£Όμ„Έμš”.
92
+ """
93
+ return call_api(prompt, max_tokens=500, temperature=0.3, top_p=0.9)
94
+
95
+ def segment_transcript(transcript):
96
+ sentences = []
97
+ start_times = []
98
+ for entry in transcript:
99
+ subtitle = entry['subtitle']
100
+ start_time = entry['start']
101
+ split_sentences = nltk.tokenize.sent_tokenize(subtitle)
102
+ sentences.extend(split_sentences)
103
+ start_times.extend([start_time] * len(split_sentences))
104
+
105
+ vectorizer = TfidfVectorizer().fit_transform(sentences)
106
+ vectors = vectorizer.toarray()
107
+
108
+ boundaries = [0]
109
+ threshold = 0.3
110
+ for i in range(1, len(sentences)):
111
+ similarity = cosine_similarity([vectors[i - 1]], [vectors[i]])[0][0]
112
+ if similarity < threshold:
113
+ boundaries.append(i)
114
+ boundaries.append(len(sentences))
115
+
116
+ sections = []
117
+ for i in range(len(boundaries) - 1):
118
+ start_idx = boundaries[i]
119
+ end_idx = boundaries[i + 1]
120
+ section_sentences = sentences[start_idx:end_idx]
121
+ section_text = ' '.join(section_sentences)
122
+ section_start_time = start_times[start_idx]
123
+ sections.append({
124
+ 'text': section_text,
125
+ 'start_time': section_start_time
126
+ })
127
+ return sections
128
+
129
+ def generate_summary(sections, url):
130
+ video_id = extract_video_id(url)
131
+ summary_html = "<h3>μš”μ•½:</h3>"
132
+ for idx, section in enumerate(sections):
133
+ start_time = section['start_time']
134
+ hours = int(start_time // 3600)
135
+ minutes = int((start_time % 3600) // 60)
136
+ seconds = int(start_time % 60)
137
+ timestamp_str = f"{hours:02d}:{minutes:02d}:{seconds:02d}"
138
+ timestamp_link = f"https://www.youtube.com/watch?v={video_id}&t={int(start_time)}s"
139
  summary = summarize_section(section['text'])
140
+ summary_html += f"""
141
+ <h4><a href="{timestamp_link}" target="_blank">{timestamp_str}</a></h4>
142
+ <div style="white-space: pre-wrap; margin-bottom: 20px;">{summary}</div>
143
+ """
144
+ return summary_html
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
 
146
  with gr.Blocks() as demo:
147
  gr.Markdown("## YouTube 슀크립트 μΆ”μΆœ 및 μš”μ•½ 도ꡬ")
 
148
  youtube_url_input = gr.Textbox(label="YouTube URL μž…λ ₯")
149
  analyze_button = gr.Button("λΆ„μ„ν•˜κΈ°")
150
  script_output = gr.HTML(label="슀크립트")
151
+ summary_output = gr.HTML(label="μš”μ•½")
152
+ cached_data = gr.State({"url": "", "title": "", "script": ""})
 
 
153
 
154
  def extract_and_cache(url, cache):
155
  if url == cache["url"]:
156
+ return cache["title"], cache["script"], cache
157
+ title, script = get_youtube_script(url)
158
+ new_cache = {"url": url, "title": title, "script": script}
159
+ return title, script, new_cache
 
160
 
161
  def display_script(title, script):
162
+ script_html = f"""<h2 style='font-size:24px;'>{title}</h2>"""
 
 
 
 
 
 
 
163
  return script_html
164
 
165
+ def update_summary(cache):
166
+ if not cache["script"]:
167
+ return "μŠ€ν¬λ¦½νŠΈκ°€ μ—†μŠ΅λ‹ˆλ‹€. λ¨Όμ € YouTube URL을 μž…λ ₯ν•˜κ³  뢄석을 μ‹€ν–‰ν•΄μ£Όμ„Έμš”."
168
+ try:
169
+ parsed_result = json.loads(cache["script"])
170
+ transcript = parsed_result.get("transcription", [])
171
+ if not transcript:
172
+ return "트랜슀크립트λ₯Ό κ°€μ Έμ˜¬ 수 μ—†μŠ΅λ‹ˆλ‹€."
173
+ sections = segment_transcript(transcript)
174
+ return generate_summary(sections, cache["url"])
175
+ except Exception as e:
176
+ logging.exception("μš”μ•½ 생성 쀑 였λ₯˜ λ°œμƒ")
177
+ return "μš”μ•½μ„ μƒμ„±ν•˜λŠ” λ™μ•ˆ 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€. λ‚˜μ€‘μ— λ‹€μ‹œ μ‹œλ„ν•΄ μ£Όμ„Έμš”."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
  analyze_button.click(
180
+ extract_and_cache,
181
+ inputs=[youtube_url_input, cached_data],
182
+ outputs=[script_output, cached_data]
183
+ ).then(
184
+ update_summary,
185
+ inputs=[cached_data],
186
+ outputs=summary_output
187
  )
188
 
189
  demo.launch(share=True)