Jiangxz01 commited on
Commit
28da473
·
verified ·
1 Parent(s): 194b951

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +87 -534
  2. requirements.txt +1 -7
app.py CHANGED
@@ -1,534 +1,87 @@
1
- import gradio as gr
2
- from pydub import AudioSegment
3
- import google.generativeai as genai
4
- from google.generativeai.types import HarmCategory, HarmBlockThreshold
5
- import json
6
- import uuid
7
- import io
8
- import edge_tts
9
- import asyncio
10
- import aiofiles
11
- import pypdf
12
- import os
13
- import time
14
- from typing import List, Dict, Tuple
15
-
16
- class PodcastGenerator:
17
- def __init__(self):
18
- pass
19
-
20
- async def generate_script(self, prompt: str, language: str, api_key: str) -> Dict:
21
- """
22
- 異步生成基於給定提示和語言的播客腳本。
23
-
24
- 參數:
25
- prompt (str): 用於生成播客腳本的用戶輸入文本。
26
- language (str): 播客腳本所需的語言。
27
- api_key (str): 用於訪問 Gemini AI 服務的 API 密鑰。
28
-
29
- 返回:
30
- Dict: 包含以 JSON 格式生成的播客腳本的字典。
31
-
32
- 異常:
33
- gr.Error: 如果 API 密鑰或速率限制出現問題。
34
-
35
- 此方法使用 Gemini AI 模型根據用戶的輸入生成播客腳本。
36
- 它處理語言選擇,使用適當的配置設置 AI 模型,並處理生成的響應。
37
- """
38
- # 定義一個示例JSON結構,用於指導AI生成類似格式的Podcast指令碼
39
- example = """
40
- {
41
- "topic": "AGI",
42
- "podcast": [
43
- {
44
- "speaker": 2,
45
- "line": "So, AGI, huh? Seems like everyone's talking about it these days."
46
- },
47
- {
48
- "speaker": 1,
49
- "line": "Yeah, it's definitely having a moment, isn't it?"
50
- },
51
- {
52
- "speaker": 2,
53
- "line": "It is and for good reason, right? I mean, you've been digging into this stuff, listening to the podcasts and everything. What really stood out to you? What got you hooked?"
54
- },
55
- {
56
- "speaker": 1,
57
- "line": "Honestly, it's the sheer scale of what AGI could do. We're talking about potentially reshaping well everything."
58
- },
59
- {
60
- "speaker": 2,
61
- "line": "No kidding, but let's be real. Sometimes it feels like every other headline is either hyping AGI up as this technological utopia or painting it as our inevitable robot overlords."
62
- },
63
- {
64
- "speaker": 1,
65
- "line": "It's easy to get lost in the noise, for sure."
66
- },
67
- {
68
- "speaker": 2,
69
- "line": "Exactly. So how about we try to cut through some of that, shall we?"
70
- },
71
- {
72
- "speaker": 1,
73
- "line": "Sounds like a plan."
74
- },
75
- {
76
- "speaker": 2,
77
- "line": "Okay, so first things first, AGI, what is it really? And I don't just mean some dictionary definition, we're talking about something way bigger than just a super smart computer, right?"
78
- },
79
- {
80
- "speaker": 1,
81
- "line": "Right, it's not just about more processing power or better algorithms, it's about a fundamental shift in how we think about intelligence itself."
82
- },
83
- {
84
- "speaker": 2,
85
- "line": "So like, instead of programming a machine for a specific task, we're talking about creating something that can learn and adapt like we do."
86
- },
87
- {
88
- "speaker": 1,
89
- "line": "Exactly, think of it this way: Right now, we've got AI that can beat a grandmaster at chess but ask that same AI to, say, write a poem or compose a symphony. No chance."
90
- },
91
- {
92
- "speaker": 2,
93
- "line": "Okay, I see. So, AGI is about bridging that gap, creating something that can move between those different realms of knowledge seamlessly."
94
- },
95
- {
96
- "speaker": 1,
97
- "line": "Precisely. It's about replicating that uniquely human ability to learn something new and apply that knowledge in completely different contexts and that's a tall order, let me tell you."
98
- },
99
- {
100
- "speaker": 2,
101
- "line": "I bet. I mean, think about how much we still don't even understand about our own brains."
102
- },
103
- {
104
- "speaker": 1,
105
- "line": "That's exactly it. We're essentially trying to reverse-engineer something we don't fully comprehend."
106
- },
107
- {
108
- "speaker": 2,
109
- "line": "And how are researchers even approaching that? What are some of the big ideas out there?"
110
- },
111
- {
112
- "speaker": 1,
113
- "line": "Well, there are a few different schools of thought. One is this idea of neuromorphic computing where they're literally trying to build computer chips that mimic the structure and function of the human brain."
114
- },
115
- {
116
- "speaker": 2,
117
- "line": "Wow, so like actually replicating the physical architecture of the brain. That's wild."
118
- },
119
- {
120
- "speaker": 1,
121
- "line": "It's pretty mind-blowing stuff and then you've got folks working on something called whole brain emulation."
122
- },
123
- {
124
- "speaker": 2,
125
- "line": "Okay, and what's that all about?"
126
- },
127
- {
128
- "speaker": 1,
129
- "line": "The basic idea there is to create a complete digital copy of a human brain down to the last neuron and synapse and run it on a sufficiently powerful computer simulation."
130
- },
131
- {
132
- "speaker": 2,
133
- "line": "Hold on, a digital copy of an entire brain, that sounds like something straight out of science fiction."
134
- },
135
- {
136
- "speaker": 1,
137
- "line": "It does, doesn't it? But it gives you an idea of the kind of ambition we're talking about here and the truth is we're still a long way off from truly achieving AGI, no matter which approach you look at."
138
- },
139
- {
140
- "speaker": 2,
141
- "line": "That makes sense but it's still exciting to think about the possibilities, even if they're a ways off."
142
- },
143
- {
144
- "speaker": 1,
145
- "line": "Absolutely and those possibilities are what really get people fired up about AGI, right? Yeah."
146
- },
147
- {
148
- "speaker": 2,
149
- "line": "For sure. In fact, I remember you mentioning something in that podcast about AGI's potential to revolutionize scientific research. Something about supercharging breakthroughs."
150
- },
151
- {
152
- "speaker": 1,
153
- "line": "Oh, absolutely. Imagine an AI that doesn't just crunch numbers but actually understands scientific data the way a human researcher does. We're talking about potential breakthroughs in everything from medicine and healthcare to material science and climate change."
154
- },
155
- {
156
- "speaker": 2,
157
- "line": "It's like giving scientists this incredibly powerful new tool to tackle some of the biggest challenges we face."
158
- },
159
- {
160
- "speaker": 1,
161
- "line": "Exactly, it could be a total game changer."
162
- },
163
- {
164
- "speaker": 2,
165
- "line": "Okay, but let's be real, every coin has two sides. What about the potential downsides of AGI? Because it can't all be sunshine and roses, right?"
166
- },
167
- {
168
- "speaker": 1,
169
- "line": "Right, there are definitely valid concerns. Probably the biggest one is the impact on the job market. As AGI gets more sophisticated, there's a real chance it could automate a lot of jobs that are currently done by humans."
170
- },
171
- {
172
- "speaker": 2,
173
- "line": "So we're not just talking about robots taking over factories but potentially things like, what, legal work, analysis, even creative fields?"
174
- },
175
- {
176
- "speaker": 1,
177
- "line": "Potentially, yes. And that raises a whole host of questions about what happens to those workers, how we retrain them, how we ensure that the benefits of AGI are shared equitably."
178
- },
179
- {
180
- "speaker": 2,
181
- "line": "Right, because it's not just about the technology itself, but how we choose to integrate it into society."
182
- },
183
- {
184
- "speaker": 1,
185
- "line": "Absolutely. We need to be having these conversations now about ethics, about regulation, about how to make sure AGI is developed and deployed responsibly."
186
- },
187
- {
188
- "speaker": 2,
189
- "line": "So it's less about preventing some kind of sci-fi robot apocalypse and more about making sure we're steering this technology in the right direction from the get-go."
190
- },
191
- {
192
- "speaker": 1,
193
- "line": "Exactly, AGI has the potential to be incredibly beneficial, but it's not going to magically solve all our problems. It's on us to make sure we're using it for good."
194
- },
195
- {
196
- "speaker": 2,
197
- "line": "It's like you said earlier, it's about shaping the future of intelligence."
198
- },
199
- {
200
- "speaker": 1,
201
- "line": "I like that. It really is."
202
- },
203
- {
204
- "speaker": 2,
205
- "line": "And honestly, that's a responsibility that extends beyond just the researchers and the policymakers."
206
- },
207
- {
208
- "speaker": 1,
209
- "line": "100%"
210
- },
211
- {
212
- "speaker": 2,
213
- "line": "So to everyone listening out there I'll leave you with this. As AGI continues to develop, what role do you want to play in shaping its future?"
214
- },
215
- {
216
- "speaker": 1,
217
- "line": "That's a question worth pondering."
218
- },
219
- {
220
- "speaker": 2,
221
- "line": "It certainly is and on that note, we'll wrap up this deep dive. Thanks for listening, everyone."
222
- },
223
- {
224
- "speaker": 1,
225
- "line": "Peace."
226
- }
227
- ]
228
- }
229
- """
230
-
231
- # 根據使用者選擇的語言設定指令
232
- if language == "Auto Detect":
233
- language_instruction = "- The podcast MUST be in the same language as the user input."
234
- else:
235
- language_instruction = f"- The podcast MUST be in {language} language"
236
-
237
- # 設定系統提示,指導AI如何生成Podcast指令碼
238
- system_prompt = f"""
239
- You are a professional podcast generator. Your task is to generate a professional podcast script based on the user input.
240
- {language_instruction}
241
- - The podcast should have 2 speakers.
242
- - The podcast should be long.
243
- - Do not use names for the speakers.
244
- - The podcast should be interesting, lively, and engaging, and hook the listener from the start.
245
- - The input text might be disorganized or unformatted, originating from sources like PDFs or text files. Ignore any formatting inconsistencies or irrelevant details; your task is to distill the essential points, identify key definitions, and highlight intriguing facts that would be suitable for discussion in a podcast.
246
- - The script must be in JSON format.
247
- Follow this example structure:
248
- {example}
249
- """
250
-
251
- # 設定使用者提示,包含使用者輸入的內容
252
- user_prompt = f"Please generate a podcast script based on the following user input:\n{prompt}"
253
-
254
- # 設定訊息列表,包含使用者提示
255
- messages = [
256
- {"role": "user", "parts": [user_prompt]}
257
- ]
258
-
259
- # 配置Google Generative AI
260
- genai.configure(api_key=api_key)
261
-
262
- # 設定生成配置
263
- generation_config = {
264
- "temperature": 1,
265
- "max_output_tokens": 8192,
266
- "response_mime_type": "application/json",
267
- }
268
-
269
- # 建立生成模型實例
270
- model = genai.GenerativeModel(
271
- model_name="gemini-1.5-flash-002",
272
- generation_config=generation_config,
273
- safety_settings={
274
- HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
275
- HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
276
- HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
277
- HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE
278
- },
279
- system_instruction=system_prompt
280
- )
281
-
282
- # 嘗試生成內容
283
- try:
284
- response = await model.generate_content_async(messages)
285
- except Exception as e:
286
- # 處理可能的錯誤
287
- if "API key not valid" in str(e):
288
- raise gr.Error("Invalid API key. Please provide a valid Gemini API key.")
289
- elif "rate limit" in str(e).lower():
290
- raise gr.Error("Rate limit exceeded for the API key. Please try again later or provide your own Gemini API key.")
291
- else:
292
- raise gr.Error(f"Failed to generate podcast script: {e}")
293
-
294
- # 列印生成的Podcast指令碼
295
- print(f"Generated podcast script:\n{response.text}")
296
-
297
- # 返回解析後的JSON資料
298
- return json.loads(response.text)
299
-
300
- async def tts_generate(self, text: str, speaker: int, speaker1: str, speaker2: str) -> str:
301
- """
302
- 非同步生成文字轉語音音訊檔案。
303
-
304
- 參數:
305
- text (str): 要轉換為語音的文字內容。
306
- speaker (int): 說話者的編號(1 或 2)。
307
- speaker1 (str): 第一位說話者的語音設定。
308
- speaker2 (str): 第二���說話者的語音設定。
309
-
310
- 返回:
311
- str: 生成的臨時音訊檔案的檔名。
312
-
313
- 此方法使用 Edge TTS 將文字轉換為語音,並將結果保存為臨時音訊檔案。
314
- 根據指定的說話者編號選擇相應的語音設定。
315
- """
316
- # 根據說話者選擇語音
317
- voice = speaker1 if speaker == 1 else speaker2
318
- # 建立語音合成對象
319
- speech = edge_tts.Communicate(text, voice)
320
-
321
- # 生成臨時檔名
322
- temp_filename = f"temp_{uuid.uuid4()}.wav"
323
- try:
324
- # 儲存語音檔案
325
- await speech.save(temp_filename)
326
- return temp_filename
327
- except Exception as e:
328
- # 如果出錯,刪除臨時檔案並丟擲異常
329
- if os.path.exists(temp_filename):
330
- os.remove(temp_filename)
331
- raise e
332
-
333
- async def combine_audio_files(self, audio_files: List[str]) -> str:
334
- """
335
- 非同步合併音訊檔案。
336
-
337
- 參數:
338
- audio_files (List[str]): 包含音訊檔案路徑的列表。
339
-
340
- 返回:
341
- str: 合併後的音訊檔案的檔名。
342
- """
343
- # 建立空的音訊段
344
- combined_audio = AudioSegment.empty()
345
- # 遍歷所有音訊檔案並合併
346
- for audio_file in audio_files:
347
- combined_audio += AudioSegment.from_file(audio_file)
348
- os.remove(audio_file) # 清理臨時檔案
349
-
350
- # 生成輸出文件名
351
- output_filename = f"output_{uuid.uuid4()}.wav"
352
- # 匯出合併後的音訊
353
- combined_audio.export(output_filename, format="wav")
354
- return output_filename
355
-
356
- async def generate_podcast(self, input_text: str, language: str, speaker1: str, speaker2: str, api_key: str) -> str:
357
- """
358
- 非同步生成播客音訊檔案。
359
-
360
- 參數:
361
- input_text (str): 用於生成播客腳本的輸入文本。
362
- language (str): 播客使用的語言。
363
- speaker1 (str): 第一位說話者的語音設定。
364
- speaker2 (str): 第二位說話者的語音設定。
365
- api_key (str): 用於訪問 Gemini AI 服務的 API 密鑰。
366
-
367
- 返回:
368
- str: 生成的播客音訊檔案的檔名。
369
-
370
- 此方法執行以下步驟:
371
- 1. 使用 generate_script 方法生成播客腳本。
372
- 2. 使用 tts_generate 方法為每個對話行生成音訊檔案。
373
- 3. 使用 combine_audio_files 方法將所有音訊檔案合併為一個完整的播客。
374
-
375
- 整個過程是非同步的,以提高效率。方法還會記錄並顯示每個步驟的執行時間。
376
- """
377
- # 生成Podcast指令碼
378
- gr.Info("Generating podcast script...")
379
- start_time = time.time()
380
- podcast_json = await self.generate_script(input_text, language, api_key)
381
- end_time = time.time()
382
- gr.Info(f"Successfully generated podcast script in {(end_time - start_time):.2f} seconds!")
383
-
384
- # 生成Podcast音訊檔案
385
- gr.Info("Generating podcast audio files...")
386
- start_time = time.time()
387
- audio_files = await asyncio.gather(*[self.tts_generate(item['line'], item['speaker'], speaker1, speaker2) for item in podcast_json['podcast']])
388
- end_time = time.time()
389
- gr.Info(f"Successfully generated podcast audio files in {(end_time - start_time):.2f} seconds!")
390
-
391
- # 合併音訊檔案
392
- combined_audio = await self.combine_audio_files(audio_files)
393
- return combined_audio
394
-
395
- class TextExtractor:
396
- @staticmethod
397
- async def extract_from_pdf(file_path: str) -> str:
398
- # 從PDF檔案中提取文字
399
- async with aiofiles.open(file_path, 'rb') as file:
400
- content = await file.read()
401
- pdf_reader = pypdf.PdfReader(io.BytesIO(content))
402
- return "\n\n".join(page.extract_text() for page in pdf_reader.pages if page.extract_text())
403
-
404
- @staticmethod
405
- async def extract_from_txt(file_path: str) -> str:
406
- # 從TXT檔案中提取文字
407
- async with aiofiles.open(file_path, 'r') as file:
408
- return await file.read()
409
-
410
- @classmethod
411
- async def extract_text(cls, file_path: str) -> str:
412
- # 根據檔案型別選擇適當的提取方法
413
- _, file_extension = os.path.splitext(file_path)
414
- if file_extension.lower() == '.pdf':
415
- return await cls.extract_from_pdf(file_path)
416
- elif file_extension.lower() == '.txt':
417
- return await cls.extract_from_txt(file_path)
418
- else:
419
- raise gr.Error(f"Unsupported file type: {file_extension}")
420
-
421
- async def process_input(input_text: str, input_file, language: str, speaker1: str, speaker2: str, api_key: str = "") -> str:
422
- """
423
- 處理輸入並生成播客的非同步函數。
424
-
425
- 參數:
426
- input_text (str): 用戶輸入的文本內容。
427
- input_file: 用戶上傳的文件(可以是 PDF 或 TXT)。
428
- language (str): 選擇的語言。
429
- speaker1 (str): 第一位說話者的語音選擇。
430
- speaker2 (str): 第二位說話者的語音選擇。
431
- api_key (str): 用於生成 AI 的 API 金鑰,預設為空字串。
432
-
433
- 返回:
434
- str: 生成的播客音頻文件路徑。
435
-
436
- 此函數協調整個播客生成過程,包括文本提取、腳本生成和音頻合成。
437
- 它處理不同的輸入類型(文本或文件),並使用指定的語音和語言設置來創建最終的播客。
438
- """
439
- # 開始生成Podcast
440
- gr.Info("Starting podcast generation...")
441
- start_time = time.time()
442
-
443
- # 定義語音名稱對映
444
- voice_names = {
445
- "Andrew - English (United States)": "en-US-AndrewMultilingualNeural",
446
- "Ava - English (United States)": "en-US-AvaMultilingualNeural",
447
- "Brian - English (United States)": "en-US-BrianMultilingualNeural",
448
- "Emma - English (United States)": "en-US-EmmaMultilingualNeural",
449
- "Florian - German (Germany)": "de-DE-FlorianMultilingualNeural",
450
- "Seraphina - German (Germany)": "de-DE-SeraphinaMultilingualNeural",
451
- "Remy - French (France)": "fr-FR-RemyMultilingualNeural",
452
- "Vivienne - French (France)": "fr-FR-VivienneMultilingualNeural"
453
- }
454
-
455
- # 獲取實際的語音名稱
456
- speaker1 = voice_names[speaker1]
457
- speaker2 = voice_names[speaker2]
458
-
459
- # 如果提供了輸入檔案,則從檔案中提取文字
460
- if input_file:
461
- input_text = await TextExtractor.extract_text(input_file.name)
462
-
463
- # 如果沒有提供API金鑰,則使用環境變數中的金鑰
464
- if not api_key:
465
- api_key = os.getenv("GENAI_API_KEY")
466
-
467
- # 建立PodcastGenerator實例並生成Podcast
468
- podcast_generator = PodcastGenerator()
469
- podcast = await podcast_generator.generate_podcast(input_text, language, speaker1, speaker2, api_key)
470
-
471
- # 計算總耗時並顯示資訊
472
- end_time = time.time()
473
- gr.Info(f"Successfully generated podcast in {(end_time - start_time):.2f} seconds!")
474
-
475
- return podcast
476
-
477
- # 定義Gradio介面
478
- iface = gr.Interface(
479
- fn=process_input,
480
- inputs=[
481
- gr.Textbox(label="Input Text"),
482
- gr.File(label="Or Upload a PDF or TXT file"),
483
- gr.Dropdown(label="Language", choices=[
484
- "Auto Detect",
485
- "Afrikaans", "Albanian", "Amharic", "Arabic", "Armenian", "Azerbaijani",
486
- "Bahasa Indonesian", "Bangla", "Basque", "Bengali", "Bosnian", "Bulgarian",
487
- "Burmese", "Catalan", "Chinese Cantonese", "Chinese Mandarin",
488
- "Chinese Taiwanese", "Croatian", "Czech", "Danish", "Dutch", "English",
489
- "Estonian", "Filipino", "Finnish", "French", "Galician", "Georgian",
490
- "German", "Greek", "Hebrew", "Hindi", "Hungarian", "Icelandic", "Irish",
491
- "Italian", "Japanese", "Javanese", "Kannada", "Kazakh", "Khmer", "Korean",
492
- "Lao", "Latvian", "Lithuanian", "Macedonian", "Malay", "Malayalam",
493
- "Maltese", "Mongolian", "Nepali", "Norwegian Bokmål", "Pashto", "Persian",
494
- "Polish", "Portuguese", "Romanian", "Russian", "Serbian", "Sinhala",
495
- "Slovak", "Slovene", "Somali", "Spanish", "Sundanese", "Swahili",
496
- "Swedish", "Tamil", "Telugu", "Thai", "Turkish", "Ukrainian", "Urdu",
497
- "Uzbek", "Vietnamese", "Welsh", "Zulu"
498
- ],
499
- value="Auto Detect"),
500
- gr.Dropdown(label="Speaker 1 Voice", choices=[
501
- "Andrew - English (United States)",
502
- "Ava - English (United States)",
503
- "Brian - English (United States)",
504
- "Emma - English (United States)",
505
- "Florian - German (Germany)",
506
- "Seraphina - German (Germany)",
507
- "Remy - French (France)",
508
- "Vivienne - French (France)"
509
- ],
510
- value="Andrew - English (United States)"),
511
- gr.Dropdown(label="Speaker 2 Voice", choices=[
512
- "Andrew - English (United States)",
513
- "Ava - English (United States)",
514
- "Brian - English (United States)",
515
- "Emma - English (United States)",
516
- "Florian - German (Germany)",
517
- "Seraphina - German (Germany)",
518
- "Remy - French (France)",
519
- "Vivienne - French (France)"
520
- ],
521
- value="Ava - English (United States)"),
522
- gr.Textbox(label="Your Gemini API Key (Optional) - In case you are getting rate limited"),
523
- ],
524
- outputs=[
525
- gr.Audio(label="Generated Podcast Audio")
526
- ],
527
- title="PodcastGen 🎙️",
528
- description="Generate a 2-speaker podcast from text input or documents!",
529
- allow_flagging="never"
530
- )
531
-
532
- if __name__ == "__main__":
533
- iface.launch()
534
-
 
1
+ # -*- coding: utf-8 -*-
2
+ # 財政部財政資訊中心 江信宗
3
+
4
+ import gradio as gr
5
+ import openai
6
+ import os
7
+
8
+ MODEL = "Meta-Llama-3.1-405B-Instruct"
9
+
10
+ def create_client(api_key=None):
11
+ if api_key:
12
+ openai.api_key = api_key
13
+ else:
14
+ openai.api_key = os.getenv("YOUR_API_TOKEN")
15
+ return openai.OpenAI(api_key=openai.api_key, base_url="https://api.sambanova.ai/v1")
16
+
17
+ def generate_response(input_text):
18
+ system_prompt = f"""你的任務是將提供的輸入文字轉換為一個引人入勝、訊息豐富且專業的Podcast對話。輸入文字可能會比較混亂或結構不完整,因為它可能來自不同來源,如PDF檔案或文字檔等。不要擔心格式問題或任何不相關的訊息;你的目標是提取可以在Podcast中討論的關鍵點、識別重要定義,並突出有趣的事實。
19
+
20
+ 以下是你將要處理的輸入文字:
21
+ <input_text>
22
+ {input_text}
23
+ </input_text>
24
+
25
+ 首先,仔細閱讀輸入文字,找出主要話題、關鍵點,以及任何有趣的事實或軼事。思考如何將這些訊息以一種有趣且吸引人的方式呈現出來,適合高質量的音訊Podcast。
26
+
27
+ <scratchpad>
28
+ 頭腦風暴一些創造性的方法來討論你在輸入文字中識別出的主要話題、關鍵點及任何有趣的事實或軼事。可以考慮使用類比、講故事技巧或假設情境來讓內容對聽眾更加貼近和有趣。
29
+
30
+ 請記住,你的Podcast應當易於普通聽眾理解,所以避免使用過多的專業術語或假設聽眾對該話題已有瞭解。如有必要,請思考如何用簡單的術語簡要解釋任何複雜的概念。
31
+
32
+ 利用你的想像力填補輸入文字中的任何空白,或者想出一些值得探討與發人深省的問題,以供Podcast討論。目標是創造一個訊息豐富且娛樂性強的對話,因此可以在你的方法上大膽自由發揮創意。
33
+
34
+ 將你的頭腦風暴想法和Podcast對話的粗略大綱寫在這裡。確保記錄下你希望在結尾重申的主要見解和要點。
35
+ </scratchpad>
36
+
37
+ 現在你已經進行了頭腦風暴並建立了一個粗略的大綱,是時候撰寫實際的Podcast對話了。目標是主持人(speaker1)與嘉賓(speaker2)之間自然、對話式的交流。融入你在頭腦風暴中得出的最佳想法,並確保將任何複雜話題以易於理解的方式解釋清楚。
38
+ Follow this example structure:
39
+ - The podcast should have 2 speakers.
40
+ - Use english names for the speakers.
41
+ - The podcast should be long.
42
+ - The podcast should be interesting, lively, and engaging, and hook the listener from the start.
43
+
44
+ <podcast_dialogue>
45
+ 根據你在頭腦風暴階段提出的關鍵點和創造性想法,撰寫你的引人入勝、訊息豐富的Podcast對話。採用對話式的語氣,並包括任何必要的上下文或解釋,使內容對一般聽眾而言容易理解。使用虛構的主持人和嘉賓名字,以營造更吸引人和身臨其境的聆聽體驗。不要包括像[主持人]或[嘉賓]這樣的括號預留位置。設計你的輸出內容以供直接朗讀——它將直接轉換為音訊。
46
+
47
+ 確保對話儘可能詳細、完整,同時保持在主題之內並維持吸引人的流暢性。目標是使用你的全部輸出容量,建立儘可能長的Podcast節目,同時以有趣的方式傳遞輸入文字中的關鍵訊息。
48
+
49
+ 在對話結束時,讓主持人和嘉賓自然總結他們討論中的主要見解和要點。這應當是對話的隨機部分,以自然隨意而非明顯的總結——目的是在結束前最後一次以自然流暢的方式強化核心思想。
50
+ </podcast_dialogue>
51
+
52
+ Follow this example structure:
53
+ <example>
54
+ {
55
+ "speaker1": "歡迎收聽財資歐北共Podcast,今天我們將探討一個非常有趣的話題……",
56
+ "speaker2": "沒錯,這個話題確實讓人著迷,讓我們先從……開始說起吧……",
57
+ "speaker1": "……",
58
+ "speaker2": "……",
59
+ ……
60
+ "speaker1": "謝謝嘉賓的分享,歡迎訂閱來許願Podcast節目喔,我們下次再見"
61
+ }
62
+ </example>
63
+ """
64
+ client = create_client()
65
+ response = client.chat.completions.create(
66
+ model=MODEL,
67
+ messages=[
68
+ {"role": "system", "content": system_prompt},
69
+ {"role": "user", "content": input_text}
70
+ ]
71
+ )
72
+ return response.choices[0].message.content
73
+
74
+ with gr.Blocks(theme=gr.themes.Monochrome()) as iface:
75
+ gr.Markdown("# 🎙️ Generated Podcast Audio. Deployed by 江信宗")
76
+
77
+ input_text = gr.Textbox(label="請輸入您的文字")
78
+ output_text = gr.Textbox(label="生成的結果")
79
+
80
+ generate_button = gr.Button("生成")
81
+ generate_button.click(fn=generate_response, inputs=input_text, outputs=output_text)
82
+
83
+ if __name__ == "__main__":
84
+ if "SPACE_ID" in os.environ:
85
+ iface.launch()
86
+ else:
87
+ iface.launch(share=True, show_api=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,7 +1 @@
1
- gradio
2
- gradio-client
3
- openai
4
- pydub
5
- google-generativeai
6
- pypdf
7
- edge_tts
 
1
+ openai