Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -627,7 +627,6 @@ def generate_section_video(prompt, preset, section_number=1, base_seed=171198, p
|
|
627 |
raise gr.Error(f"μΉμ
{section_number} μμ± μ€ μ€λ₯: {str(e)}")
|
628 |
|
629 |
|
630 |
-
# κ°λ³ μΉμ
ν둬ννΈ μμ± ν¨μ μΆκ°
|
631 |
def generate_single_section_prompt(scenario, section_number):
|
632 |
"""κ°λ³ μΉμ
μ λν ν둬ννΈ μμ±"""
|
633 |
section_descriptions = {
|
@@ -642,23 +641,35 @@ def generate_single_section_prompt(scenario, section_number):
|
|
642 |
{"role": "system", "content": system_prompt_scenario},
|
643 |
{"role": "user", "content": f"""
|
644 |
λ€μ μ€ν¬λ¦½νΈμ {section_number}λ²μ§Έ μΉμ
({section_descriptions[section_number]})μ λν
|
645 |
-
λ°°κ²½ μμ
|
646 |
|
|
|
647 |
{scenario}
|
648 |
|
649 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
650 |
]
|
651 |
|
652 |
try:
|
653 |
response = client.chat.completions.create(
|
654 |
model="gpt-4-1106-preview",
|
655 |
messages=messages,
|
656 |
-
max_tokens=
|
|
|
657 |
)
|
658 |
-
|
|
|
659 |
except Exception as e:
|
660 |
-
print(f"Error during prompt generation: {e}")
|
661 |
-
return "Error occurred during prompt generation"
|
662 |
|
663 |
|
664 |
# λΉλμ€ κ²°ν© ν¨μ μΆκ°
|
@@ -996,6 +1007,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as iface:
|
|
996 |
|
997 |
|
998 |
# Event handlers
|
|
|
999 |
txt2vid_preset.change(
|
1000 |
fn=preset_changed,
|
1001 |
inputs=[txt2vid_preset],
|
@@ -1031,6 +1043,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as iface:
|
|
1031 |
queue=True,
|
1032 |
)
|
1033 |
|
|
|
1034 |
img2vid_preset.change(
|
1035 |
fn=preset_changed,
|
1036 |
inputs=[img2vid_preset],
|
@@ -1067,7 +1080,15 @@ with gr.Blocks(theme=gr.themes.Soft()) as iface:
|
|
1067 |
queue=True,
|
1068 |
)
|
1069 |
|
1070 |
-
# Scenario
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1071 |
analyze_btn.click(
|
1072 |
fn=analyze_scenario,
|
1073 |
inputs=[scenario_input],
|
@@ -1077,45 +1098,38 @@ with gr.Blocks(theme=gr.themes.Soft()) as iface:
|
|
1077 |
]
|
1078 |
)
|
1079 |
|
1080 |
-
#
|
1081 |
-
|
1082 |
-
fn=
|
1083 |
-
inputs=[
|
1084 |
-
outputs=
|
1085 |
-
api_name=f"generate_section1"
|
1086 |
)
|
1087 |
|
1088 |
-
|
1089 |
-
fn=lambda
|
1090 |
-
inputs=[
|
1091 |
-
outputs=
|
1092 |
-
api_name=f"generate_section2"
|
1093 |
)
|
1094 |
|
1095 |
-
|
1096 |
-
fn=lambda
|
1097 |
-
inputs=[
|
1098 |
-
outputs=
|
1099 |
-
api_name=f"generate_section3"
|
1100 |
)
|
1101 |
|
1102 |
-
|
1103 |
-
fn=lambda
|
1104 |
-
inputs=[
|
1105 |
-
outputs=
|
1106 |
-
api_name=f"generate_section4"
|
1107 |
)
|
1108 |
|
1109 |
-
|
1110 |
-
fn=lambda
|
1111 |
-
inputs=[
|
1112 |
-
outputs=
|
1113 |
-
api_name=f"generate_section5"
|
1114 |
)
|
1115 |
|
1116 |
-
|
1117 |
-
|
1118 |
-
# μΉμ
μμ± μ΄λ²€νΈ νΈλ€λ¬
|
1119 |
section1_generate.click(
|
1120 |
fn=lambda p, pr: generate_section_video(p, pr, 1),
|
1121 |
inputs=[section1_prompt, scenario_preset],
|
@@ -1146,8 +1160,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as iface:
|
|
1146 |
outputs=section5_video
|
1147 |
)
|
1148 |
|
1149 |
-
|
1150 |
-
# μ΄λ²€νΈ νΈλ€λ¬ μΆκ°
|
1151 |
merge_videos_btn.click(
|
1152 |
fn=merge_section_videos,
|
1153 |
inputs=[
|
@@ -1159,13 +1172,6 @@ with gr.Blocks(theme=gr.themes.Soft()) as iface:
|
|
1159 |
],
|
1160 |
outputs=merged_video_output
|
1161 |
)
|
1162 |
-
|
1163 |
-
# μ€ν¬λ¦½νΈ μμ± λ²νΌ μ΄λ²€νΈ νΈλ€λ¬
|
1164 |
-
generate_script_btn.click(
|
1165 |
-
fn=generate_script,
|
1166 |
-
inputs=[script_topic],
|
1167 |
-
outputs=[scenario_input]
|
1168 |
-
)
|
1169 |
|
1170 |
if __name__ == "__main__":
|
1171 |
iface.queue(max_size=64, default_concurrency_limit=1, api_open=False).launch(
|
|
|
627 |
raise gr.Error(f"μΉμ
{section_number} μμ± μ€ μ€λ₯: {str(e)}")
|
628 |
|
629 |
|
|
|
630 |
def generate_single_section_prompt(scenario, section_number):
|
631 |
"""κ°λ³ μΉμ
μ λν ν둬ννΈ μμ±"""
|
632 |
section_descriptions = {
|
|
|
641 |
{"role": "system", "content": system_prompt_scenario},
|
642 |
{"role": "user", "content": f"""
|
643 |
λ€μ μ€ν¬λ¦½νΈμ {section_number}λ²μ§Έ μΉμ
({section_descriptions[section_number]})μ λν
|
644 |
+
λ°°κ²½ μμ ν둬ννΈλ₯Ό μμ±ν΄μ£ΌμΈμ.
|
645 |
|
646 |
+
μ€ν¬λ¦½νΈ:
|
647 |
{scenario}
|
648 |
|
649 |
+
μ£Όμμ¬ν:
|
650 |
+
1. ν΄λΉ μΉμ
μ νΉμ±({section_descriptions[section_number]})μ λ§λ λΆμκΈ°μ ν€μ λ°μνμΈμ.
|
651 |
+
2. μ§μ μ μΈ μ ν/μλΉμ€ λ¬μ¬λ νΌνκ³ , κ°μ±μ μ΄κ³ μμ μ μΈ λ°°κ²½ μμμ μ§μ€νμΈμ.
|
652 |
+
3. λ€μ ꡬ쑰λ₯Ό λ°λμ ν¬ν¨νμΈμ:
|
653 |
+
- μ£Όμ λμμ λͺ
νν ν λ¬Έμ₯μΌλ‘ μμ
|
654 |
+
- ꡬ체μ μΈ λμκ³Ό μ μ€μ²λ₯Ό μκ° μμλλ‘ μ€λͺ
|
655 |
+
- λ°°κ²½κ³Ό νκ²½ μΈλΆ μ¬νμ ꡬ체μ μΌλ‘ ν¬ν¨
|
656 |
+
- μΉ΄λ©λΌ κ°λμ μμ§μμ λͺ
μ
|
657 |
+
- μ‘°λͺ
κ³Ό μμμ μμΈν μ€λͺ
|
658 |
+
- λ³νλ κ°μμ€λ¬μ΄ μ¬κ±΄μ μμ°μ€λ½κ² ν¬ν¨"""}
|
659 |
]
|
660 |
|
661 |
try:
|
662 |
response = client.chat.completions.create(
|
663 |
model="gpt-4-1106-preview",
|
664 |
messages=messages,
|
665 |
+
max_tokens=1000, # ν ν° μ μ¦κ°
|
666 |
+
temperature=0.7
|
667 |
)
|
668 |
+
generated_prompt = response.choices[0].message.content.strip()
|
669 |
+
return f"{section_number}. {generated_prompt}"
|
670 |
except Exception as e:
|
671 |
+
print(f"Error during prompt generation for section {section_number}: {e}")
|
672 |
+
return f"Error occurred during prompt generation for section {section_number}"
|
673 |
|
674 |
|
675 |
# λΉλμ€ κ²°ν© ν¨μ μΆκ°
|
|
|
1007 |
|
1008 |
|
1009 |
# Event handlers
|
1010 |
+
# Text to Video Tab handlers
|
1011 |
txt2vid_preset.change(
|
1012 |
fn=preset_changed,
|
1013 |
inputs=[txt2vid_preset],
|
|
|
1043 |
queue=True,
|
1044 |
)
|
1045 |
|
1046 |
+
# Image to Video Tab handlers
|
1047 |
img2vid_preset.change(
|
1048 |
fn=preset_changed,
|
1049 |
inputs=[img2vid_preset],
|
|
|
1080 |
queue=True,
|
1081 |
)
|
1082 |
|
1083 |
+
# Scenario Tab handlers
|
1084 |
+
# μ€ν¬λ¦½νΈ μμ± λ²νΌ νΈλ€λ¬
|
1085 |
+
generate_script_btn.click(
|
1086 |
+
fn=generate_script,
|
1087 |
+
inputs=[script_topic],
|
1088 |
+
outputs=[scenario_input]
|
1089 |
+
)
|
1090 |
+
|
1091 |
+
# μλλ¦¬μ€ λΆμ λ²νΌ νΈλ€λ¬
|
1092 |
analyze_btn.click(
|
1093 |
fn=analyze_scenario,
|
1094 |
inputs=[scenario_input],
|
|
|
1098 |
]
|
1099 |
)
|
1100 |
|
1101 |
+
# μΉμ
λ³ ν둬ννΈ μ¬μμ± νΈλ€λ¬
|
1102 |
+
section1_regenerate.click(
|
1103 |
+
fn=lambda x: generate_single_section_prompt(x, 1),
|
1104 |
+
inputs=[scenario_input],
|
1105 |
+
outputs=section1_prompt
|
|
|
1106 |
)
|
1107 |
|
1108 |
+
section2_regenerate.click(
|
1109 |
+
fn=lambda x: generate_single_section_prompt(x, 2),
|
1110 |
+
inputs=[scenario_input],
|
1111 |
+
outputs=section2_prompt
|
|
|
1112 |
)
|
1113 |
|
1114 |
+
section3_regenerate.click(
|
1115 |
+
fn=lambda x: generate_single_section_prompt(x, 3),
|
1116 |
+
inputs=[scenario_input],
|
1117 |
+
outputs=section3_prompt
|
|
|
1118 |
)
|
1119 |
|
1120 |
+
section4_regenerate.click(
|
1121 |
+
fn=lambda x: generate_single_section_prompt(x, 4),
|
1122 |
+
inputs=[scenario_input],
|
1123 |
+
outputs=section4_prompt
|
|
|
1124 |
)
|
1125 |
|
1126 |
+
section5_regenerate.click(
|
1127 |
+
fn=lambda x: generate_single_section_prompt(x, 5),
|
1128 |
+
inputs=[scenario_input],
|
1129 |
+
outputs=section5_prompt
|
|
|
1130 |
)
|
1131 |
|
1132 |
+
# μΉμ
λ³ λΉλμ€ μμ± νΈλ€λ¬
|
|
|
|
|
1133 |
section1_generate.click(
|
1134 |
fn=lambda p, pr: generate_section_video(p, pr, 1),
|
1135 |
inputs=[section1_prompt, scenario_preset],
|
|
|
1160 |
outputs=section5_video
|
1161 |
)
|
1162 |
|
1163 |
+
# ν΅ν© μμ μμ± νΈλ€λ¬
|
|
|
1164 |
merge_videos_btn.click(
|
1165 |
fn=merge_section_videos,
|
1166 |
inputs=[
|
|
|
1172 |
],
|
1173 |
outputs=merged_video_output
|
1174 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1175 |
|
1176 |
if __name__ == "__main__":
|
1177 |
iface.queue(max_size=64, default_concurrency_limit=1, api_open=False).launch(
|