openfree commited on
Commit
84b6b82
Β·
verified Β·
1 Parent(s): 346d452

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +183 -199
app.py CHANGED
@@ -239,14 +239,7 @@ pipeline = XoraVideoPipeline(
239
  vae=vae,
240
  ).to(device)
241
 
242
- # State λ³€μˆ˜λ“€μ˜ μ΄ˆκΈ°ν™” μˆ˜μ •
243
- txt2vid_current_height = gr.State(value=320)
244
- txt2vid_current_width = gr.State(value=512)
245
- txt2vid_current_num_frames = gr.State(value=257)
246
 
247
- img2vid_current_height = gr.State(value=320)
248
- img2vid_current_width = gr.State(value=512)
249
- img2vid_current_num_frames = gr.State(value=257)
250
 
251
  # Preset options for resolution and frame configuration
252
  # Convert frames to seconds assuming 25 FPS
@@ -279,16 +272,18 @@ preset_options = [
279
  ]
280
 
281
  def preset_changed(preset):
282
- selected = next(item for item in preset_options if item["label"] == preset)
 
 
283
  return [
284
- selected["height"],
285
- selected["width"],
286
- selected["num_frames"],
287
  gr.update(visible=False),
288
  gr.update(visible=False),
289
  gr.update(visible=False),
290
- ]
291
-
292
  def generate_video_from_text(
293
  prompt,
294
  enhance_prompt_toggle,
@@ -849,6 +844,16 @@ def cleanup():
849
 
850
  # Gradio Interface Definition
851
  with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange") as iface:
 
 
 
 
 
 
 
 
 
 
852
  with gr.Tabs():
853
  # Text to Video Tab
854
  with gr.TabItem("ν…μŠ€νŠΈλ‘œ λΉ„λ””μ˜€ λ§Œλ“€κΈ°"):
@@ -1052,192 +1057,171 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange") as iface:
1052
  merged_video_output = gr.Video(label="톡합 μ˜μƒ")
1053
 
1054
 
1055
-
1056
-
1057
- # Event handlers
1058
- # Text to Video Tab handlers
1059
- txt2vid_preset.change(
1060
- fn=preset_changed,
1061
- inputs=[txt2vid_preset],
1062
- outputs=[
1063
- txt2vid_current_height,
1064
- txt2vid_current_width,
1065
- txt2vid_current_num_frames,
1066
- *txt2vid_advanced[3:]
1067
- ]
1068
- )
1069
-
1070
- txt2vid_enhance_toggle.change(
1071
- fn=update_prompt_t2v,
1072
- inputs=[txt2vid_prompt, txt2vid_enhance_toggle],
1073
- outputs=txt2vid_prompt
1074
- )
1075
- # Event handlers μˆ˜μ •
1076
- # Text to Video Tab handlers
1077
- txt2vid_generate.click(
1078
- fn=generate_video_from_text,
1079
- inputs=[
1080
- txt2vid_prompt, # ν…μŠ€νŠΈ μž…λ ₯
1081
- txt2vid_enhance_toggle, # ν”„λ‘¬ν”„νŠΈ κ°œμ„  ν† κΈ€
1082
- txt2vid_negative_prompt, # λ„€κ±°ν‹°λΈŒ ν”„λ‘¬ν”„νŠΈ
1083
- txt2vid_frame_rate, # ν”„λ ˆμž„ 레이트
1084
- *txt2vid_advanced[:3], # seed, inference_steps, guidance_scale
1085
- txt2vid_current_height, # height
1086
- txt2vid_current_width, # width
1087
- txt2vid_current_num_frames, # num_frames
1088
- ],
1089
- outputs=txt2vid_output,
1090
- api_name="generate_text_to_video"
1091
- )
1092
-
1093
- # Image to Video Tab handlers
1094
- img2vid_generate.click(
1095
- fn=generate_video_from_image,
1096
- inputs=[
1097
- img2vid_image, # μž…λ ₯ 이미지
1098
- img2vid_prompt, # ν…μŠ€νŠΈ μž…λ ₯
1099
- img2vid_enhance_toggle, # ν”„λ‘¬ν”„νŠΈ κ°œμ„  ν† κΈ€
1100
- img2vid_negative_prompt, # λ„€κ±°ν‹°λΈŒ ν”„λ‘¬ν”„νŠΈ
1101
- img2vid_frame_rate, # ν”„λ ˆμž„ 레이트
1102
- *img2vid_advanced[:3], # seed, inference_steps, guidance_scale
1103
- img2vid_current_height, # height
1104
- img2vid_current_width, # width
1105
- img2vid_current_num_frames, # num_frames
1106
- ],
1107
- outputs=img2vid_output,
1108
- api_name="generate_image_to_video"
1109
- )
1110
-
1111
-
1112
- img2vid_enhance_toggle.change(
1113
- fn=update_prompt_i2v,
1114
- inputs=[img2vid_prompt, img2vid_enhance_toggle],
1115
- outputs=img2vid_prompt
1116
- )
1117
-
1118
- img2vid_preset.change(
1119
- fn=preset_changed,
1120
- inputs=[img2vid_preset],
1121
- outputs=[
1122
- img2vid_current_height,
1123
- img2vid_current_width,
1124
- img2vid_current_num_frames,
1125
- *img2vid_advanced[3:]
1126
- ]
1127
- )
1128
-
1129
-
1130
- img2vid_generate.click(
1131
- fn=generate_video_from_image,
1132
- inputs=[
1133
- img2vid_image,
1134
- img2vid_prompt,
1135
- img2vid_enhance_toggle,
1136
- img2vid_negative_prompt,
1137
- img2vid_frame_rate,
1138
- *img2vid_advanced[:3],
1139
- img2vid_current_height,
1140
- img2vid_current_width,
1141
- img2vid_current_num_frames,
1142
- ],
1143
- outputs=img2vid_output,
1144
- concurrency_limit=1,
1145
- concurrency_id="generate_video",
1146
- queue=True,
1147
- )
1148
-
1149
- # Scenario Tab handlers
1150
- # 슀크립트 생성 λ²„νŠΌ ν•Έλ“€λŸ¬
1151
- generate_script_btn.click(
1152
- fn=generate_script,
1153
- inputs=[script_topic],
1154
- outputs=[scenario_input]
1155
- )
1156
-
1157
- # μ‹œλ‚˜λ¦¬μ˜€ 뢄석 λ²„νŠΌ ν•Έλ“€λŸ¬
1158
- analyze_btn.click(
1159
- fn=analyze_scenario,
1160
- inputs=[scenario_input],
1161
- outputs=[
1162
- section1_prompt, section2_prompt, section3_prompt,
1163
- section4_prompt, section5_prompt
1164
- ]
1165
- )
1166
-
1167
- # μ„Ήμ…˜λ³„ ν”„λ‘¬ν”„νŠΈ μž¬μƒμ„± ν•Έλ“€λŸ¬
1168
- section1_regenerate.click(
1169
- fn=lambda x: generate_single_section_prompt(x, 1),
1170
- inputs=[scenario_input],
1171
- outputs=section1_prompt
1172
- )
1173
-
1174
- section2_regenerate.click(
1175
- fn=lambda x: generate_single_section_prompt(x, 2),
1176
- inputs=[scenario_input],
1177
- outputs=section2_prompt
1178
- )
1179
-
1180
- section3_regenerate.click(
1181
- fn=lambda x: generate_single_section_prompt(x, 3),
1182
- inputs=[scenario_input],
1183
- outputs=section3_prompt
1184
- )
1185
-
1186
- section4_regenerate.click(
1187
- fn=lambda x: generate_single_section_prompt(x, 4),
1188
- inputs=[scenario_input],
1189
- outputs=section4_prompt
1190
- )
1191
-
1192
- section5_regenerate.click(
1193
- fn=lambda x: generate_single_section_prompt(x, 5),
1194
- inputs=[scenario_input],
1195
- outputs=section5_prompt
1196
- )
1197
-
1198
- # μ„Ήμ…˜λ³„ λΉ„λ””μ˜€ 생성 ν•Έλ“€λŸ¬
1199
- section1_generate.click(
1200
- fn=lambda p, pr: generate_section_video(p, pr, 1),
1201
- inputs=[section1_prompt, scenario_preset],
1202
- outputs=section1_video
1203
- )
1204
-
1205
- section2_generate.click(
1206
- fn=lambda p, pr: generate_section_video(p, pr, 2),
1207
- inputs=[section2_prompt, scenario_preset],
1208
- outputs=section2_video
1209
- )
1210
-
1211
- section3_generate.click(
1212
- fn=lambda p, pr: generate_section_video(p, pr, 3),
1213
- inputs=[section3_prompt, scenario_preset],
1214
- outputs=section3_video
1215
- )
1216
-
1217
- section4_generate.click(
1218
- fn=lambda p, pr: generate_section_video(p, pr, 4),
1219
- inputs=[section4_prompt, scenario_preset],
1220
- outputs=section4_video
1221
- )
1222
-
1223
- section5_generate.click(
1224
- fn=lambda p, pr: generate_section_video(p, pr, 5),
1225
- inputs=[section5_prompt, scenario_preset],
1226
- outputs=section5_video
1227
- )
1228
-
1229
- # 톡합 μ˜μƒ 생성 ν•Έλ“€λŸ¬
1230
- merge_videos_btn.click(
1231
- fn=merge_section_videos,
1232
- inputs=[
1233
- section1_video,
1234
- section2_video,
1235
- section3_video,
1236
- section4_video,
1237
- section5_video
1238
- ],
1239
- outputs=merged_video_output
1240
- )
1241
 
1242
  if __name__ == "__main__":
1243
  iface.queue(max_size=64, default_concurrency_limit=1, api_open=False).launch(
 
239
  vae=vae,
240
  ).to(device)
241
 
 
 
 
 
242
 
 
 
 
243
 
244
  # Preset options for resolution and frame configuration
245
  # Convert frames to seconds assuming 25 FPS
 
272
  ]
273
 
274
  def preset_changed(preset):
275
+ selected = next((item for item in preset_options if item["label"] == preset), None)
276
+ if selected is None:
277
+ raise gr.Error("Invalid preset selected")
278
  return [
279
+ gr.State(value=selected["height"]),
280
+ gr.State(value=selected["width"]),
281
+ gr.State(value=selected["num_frames"]),
282
  gr.update(visible=False),
283
  gr.update(visible=False),
284
  gr.update(visible=False),
285
+ ]
286
+
287
  def generate_video_from_text(
288
  prompt,
289
  enhance_prompt_toggle,
 
844
 
845
  # Gradio Interface Definition
846
  with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange") as iface:
847
+
848
+ # State λ³€μˆ˜λ“€μ˜ μ΄ˆκΈ°ν™” μˆ˜μ •
849
+ txt2vid_current_height = gr.State(value=320)
850
+ txt2vid_current_width = gr.State(value=512)
851
+ txt2vid_current_num_frames = gr.State(value=257)
852
+
853
+ img2vid_current_height = gr.State(value=320)
854
+ img2vid_current_width = gr.State(value=512)
855
+ img2vid_current_num_frames = gr.State(value=257)
856
+
857
  with gr.Tabs():
858
  # Text to Video Tab
859
  with gr.TabItem("ν…μŠ€νŠΈλ‘œ λΉ„λ””μ˜€ λ§Œλ“€κΈ°"):
 
1057
  merged_video_output = gr.Video(label="톡합 μ˜μƒ")
1058
 
1059
 
1060
+ # Text to Video Tab handlers
1061
+ txt2vid_preset.change(
1062
+ fn=preset_changed,
1063
+ inputs=[txt2vid_preset],
1064
+ outputs=[
1065
+ txt2vid_current_height,
1066
+ txt2vid_current_width,
1067
+ txt2vid_current_num_frames,
1068
+ txt2vid_advanced[3], # height_slider
1069
+ txt2vid_advanced[4], # width_slider
1070
+ txt2vid_advanced[5], # num_frames_slider
1071
+ ]
1072
+ )
1073
+
1074
+ txt2vid_enhance_toggle.change(
1075
+ fn=update_prompt_t2v,
1076
+ inputs=[txt2vid_prompt, txt2vid_enhance_toggle],
1077
+ outputs=txt2vid_prompt
1078
+ )
1079
+
1080
+ txt2vid_generate.click(
1081
+ fn=generate_video_from_text,
1082
+ inputs=[
1083
+ txt2vid_prompt,
1084
+ txt2vid_enhance_toggle,
1085
+ txt2vid_negative_prompt,
1086
+ txt2vid_frame_rate,
1087
+ txt2vid_advanced[0], # seed
1088
+ txt2vid_advanced[1], # inference_steps
1089
+ txt2vid_advanced[2], # guidance_scale
1090
+ txt2vid_current_height,
1091
+ txt2vid_current_width,
1092
+ txt2vid_current_num_frames,
1093
+ ],
1094
+ outputs=txt2vid_output,
1095
+ )
1096
+
1097
+ # Image to Video Tab handlers
1098
+ img2vid_preset.change(
1099
+ fn=preset_changed,
1100
+ inputs=[img2vid_preset],
1101
+ outputs=[
1102
+ img2vid_current_height,
1103
+ img2vid_current_width,
1104
+ img2vid_current_num_frames,
1105
+ img2vid_advanced[3], # height_slider
1106
+ img2vid_advanced[4], # width_slider
1107
+ img2vid_advanced[5], # num_frames_slider
1108
+ ]
1109
+ )
1110
+
1111
+ img2vid_enhance_toggle.change(
1112
+ fn=update_prompt_i2v,
1113
+ inputs=[img2vid_prompt, img2vid_enhance_toggle],
1114
+ outputs=img2vid_prompt
1115
+ )
1116
+
1117
+ img2vid_generate.click(
1118
+ fn=generate_video_from_image,
1119
+ inputs=[
1120
+ img2vid_image,
1121
+ img2vid_prompt,
1122
+ img2vid_enhance_toggle,
1123
+ img2vid_negative_prompt,
1124
+ img2vid_frame_rate,
1125
+ img2vid_advanced[0], # seed
1126
+ img2vid_advanced[1], # inference_steps
1127
+ img2vid_advanced[2], # guidance_scale
1128
+ img2vid_current_height,
1129
+ img2vid_current_width,
1130
+ img2vid_current_num_frames,
1131
+ ],
1132
+ outputs=img2vid_output,
1133
+ )
1134
+
1135
+ # Scenario Tab handlers
1136
+ generate_script_btn.click(
1137
+ fn=generate_script,
1138
+ inputs=[script_topic],
1139
+ outputs=[scenario_input]
1140
+ )
1141
+
1142
+ analyze_btn.click(
1143
+ fn=analyze_scenario,
1144
+ inputs=[scenario_input],
1145
+ outputs=[
1146
+ section1_prompt, section2_prompt, section3_prompt,
1147
+ section4_prompt, section5_prompt
1148
+ ]
1149
+ )
1150
+
1151
+ # μ„Ήμ…˜λ³„ ν”„λ‘¬ν”„νŠΈ μž¬μƒμ„± ν•Έλ“€λŸ¬
1152
+ section1_regenerate.click(
1153
+ fn=lambda x: generate_single_section_prompt(x, 1),
1154
+ inputs=[scenario_input],
1155
+ outputs=section1_prompt
1156
+ )
1157
+
1158
+ section2_regenerate.click(
1159
+ fn=lambda x: generate_single_section_prompt(x, 2),
1160
+ inputs=[scenario_input],
1161
+ outputs=section2_prompt
1162
+ )
1163
+
1164
+ section3_regenerate.click(
1165
+ fn=lambda x: generate_single_section_prompt(x, 3),
1166
+ inputs=[scenario_input],
1167
+ outputs=section3_prompt
1168
+ )
1169
+
1170
+ section4_regenerate.click(
1171
+ fn=lambda x: generate_single_section_prompt(x, 4),
1172
+ inputs=[scenario_input],
1173
+ outputs=section4_prompt
1174
+ )
1175
+
1176
+ section5_regenerate.click(
1177
+ fn=lambda x: generate_single_section_prompt(x, 5),
1178
+ inputs=[scenario_input],
1179
+ outputs=section5_prompt
1180
+ )
1181
+
1182
+ # μ„Ήμ…˜λ³„ λΉ„λ””μ˜€ 생성 ν•Έλ“€λŸ¬
1183
+ section1_generate.click(
1184
+ fn=lambda p, pr: generate_section_video(p, pr, 1),
1185
+ inputs=[section1_prompt, scenario_preset],
1186
+ outputs=section1_video
1187
+ )
1188
+
1189
+ section2_generate.click(
1190
+ fn=lambda p, pr: generate_section_video(p, pr, 2),
1191
+ inputs=[section2_prompt, scenario_preset],
1192
+ outputs=section2_video
1193
+ )
1194
+
1195
+ section3_generate.click(
1196
+ fn=lambda p, pr: generate_section_video(p, pr, 3),
1197
+ inputs=[section3_prompt, scenario_preset],
1198
+ outputs=section3_video
1199
+ )
1200
+
1201
+ section4_generate.click(
1202
+ fn=lambda p, pr: generate_section_video(p, pr, 4),
1203
+ inputs=[section4_prompt, scenario_preset],
1204
+ outputs=section4_video
1205
+ )
1206
+
1207
+ section5_generate.click(
1208
+ fn=lambda p, pr: generate_section_video(p, pr, 5),
1209
+ inputs=[section5_prompt, scenario_preset],
1210
+ outputs=section5_video
1211
+ )
1212
+
1213
+ # 톡합 μ˜μƒ 생성 ν•Έλ“€λŸ¬
1214
+ merge_videos_btn.click(
1215
+ fn=merge_section_videos,
1216
+ inputs=[
1217
+ section1_video,
1218
+ section2_video,
1219
+ section3_video,
1220
+ section4_video,
1221
+ section5_video
1222
+ ],
1223
+ outputs=merged_video_output
1224
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1225
 
1226
  if __name__ == "__main__":
1227
  iface.queue(max_size=64, default_concurrency_limit=1, api_open=False).launch(