Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,5 @@
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from deep_translator import GoogleTranslator
|
3 |
import os
|
@@ -6,11 +8,8 @@ import subprocess
|
|
6 |
from PIL import Image, ImageDraw, ImageFont
|
7 |
import re
|
8 |
import uuid
|
9 |
-
# Path to the ZIP file
|
10 |
zip_file = "./fonts.zip"
|
11 |
-
# Destination folder
|
12 |
extract_to = "./fonts/"
|
13 |
-
# Extract the ZIP file
|
14 |
shutil.unpack_archive(zip_file, extract_to, 'zip')
|
15 |
|
16 |
# Generate unique filename for the video
|
@@ -138,11 +137,12 @@ def get_video_duration(video_file):
|
|
138 |
else:
|
139 |
raise ValueError("Could not extract video duration.")
|
140 |
|
|
|
141 |
|
142 |
|
143 |
|
144 |
# Generate video function
|
145 |
-
def generate_video(input_text, language_set, font_size, theme, canvas_size):
|
146 |
width, height = map(int, canvas_size.split('x'))
|
147 |
theme_colors = {"Black Background": ("#FFFFFF", "#000000"), "White Background": ("#000000", "#FFFFFF")}
|
148 |
text_color, bg_color = theme_colors[theme]
|
@@ -165,19 +165,31 @@ def generate_video(input_text, language_set, font_size, theme, canvas_size):
|
|
165 |
FRAMES_PER_IMAGE = round(FPS * DURATION_PER_IMAGE) # Frames needed per image
|
166 |
extra_frames = FRAMES_PER_IMAGE+10 # Extra frames for the last image to allow fade-out effect
|
167 |
frame_index = 0 # Start numbering frames
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
168 |
|
169 |
-
|
170 |
-
|
171 |
-
img_path = create_image(translated_text, lang, font_size, text_color, bg_color, width, height, output_folder)
|
172 |
-
|
173 |
-
# Check if it's the last image
|
174 |
-
frame_count = FRAMES_PER_IMAGE + extra_frames if i == len(language_list) - 1 else FRAMES_PER_IMAGE
|
175 |
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
|
182 |
# Generate video using FFmpeg
|
183 |
output_video = "multi_language_video.mp4"
|
@@ -240,13 +252,14 @@ def ui():
|
|
240 |
with gr.Accordion('ποΈ Text Style', open=False):
|
241 |
font_size = gr.Slider(20, 200, value=100, step=1, label="π Font Size") # FIXED (Removed comma)
|
242 |
theme = gr.Radio(["Black Background", "White Background"], label="π¨ Theme", value="Black Background")
|
|
|
243 |
|
244 |
with gr.Column():
|
245 |
download_video = gr.File(label="π₯ Download Video")
|
246 |
play_video = gr.Video(label="π¬ Generated Video")
|
247 |
|
248 |
# Define Inputs and Outputs
|
249 |
-
input_list = [input_text, language_set, font_size, theme, canvas_size]
|
250 |
output_list = [download_video, play_video]
|
251 |
|
252 |
# Bind Functions to UI Elements
|
@@ -258,9 +271,59 @@ def ui():
|
|
258 |
|
259 |
return demo
|
260 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
261 |
# Launch the app
|
262 |
-
def main(share=False, debug=
|
263 |
-
|
|
|
|
|
264 |
demo.queue().launch(debug=debug, share=share)
|
265 |
|
266 |
main()
|
|
|
1 |
+
|
2 |
+
|
3 |
import gradio as gr
|
4 |
from deep_translator import GoogleTranslator
|
5 |
import os
|
|
|
8 |
from PIL import Image, ImageDraw, ImageFont
|
9 |
import re
|
10 |
import uuid
|
|
|
11 |
zip_file = "./fonts.zip"
|
|
|
12 |
extract_to = "./fonts/"
|
|
|
13 |
shutil.unpack_archive(zip_file, extract_to, 'zip')
|
14 |
|
15 |
# Generate unique filename for the video
|
|
|
137 |
else:
|
138 |
raise ValueError("Could not extract video duration.")
|
139 |
|
140 |
+
import json
|
141 |
|
142 |
|
143 |
|
144 |
# Generate video function
|
145 |
+
def generate_video(input_text, language_set, font_size, theme, canvas_size,llm_translation=None):
|
146 |
width, height = map(int, canvas_size.split('x'))
|
147 |
theme_colors = {"Black Background": ("#FFFFFF", "#000000"), "White Background": ("#000000", "#FFFFFF")}
|
148 |
text_color, bg_color = theme_colors[theme]
|
|
|
165 |
FRAMES_PER_IMAGE = round(FPS * DURATION_PER_IMAGE) # Frames needed per image
|
166 |
extra_frames = FRAMES_PER_IMAGE+10 # Extra frames for the last image to allow fade-out effect
|
167 |
frame_index = 0 # Start numbering frames
|
168 |
+
if llm_translation:
|
169 |
+
data = json.loads(llm_translation)
|
170 |
+
language_list = list(data.keys())
|
171 |
+
for i, lang in enumerate(language_list):
|
172 |
+
translated_text = data[lang]
|
173 |
+
img_path = create_image(translated_text, lang, font_size, text_color, bg_color, width, height, output_folder)
|
174 |
+
frame_count = FRAMES_PER_IMAGE + extra_frames if i == len(language_list) - 1 else FRAMES_PER_IMAGE
|
175 |
+
# Duplicate frames for smooth video
|
176 |
+
for _ in range(frame_count):
|
177 |
+
frame_filename = os.path.join(frames, f"{frame_index:05d}.png")
|
178 |
+
shutil.copy(img_path, frame_filename)
|
179 |
+
frame_index += 1
|
180 |
+
else:
|
181 |
+
for i, lang in enumerate(language_list):
|
182 |
+
translated_text = translate_text(input_text, lang) if lang != 'en' else input_text
|
183 |
+
img_path = create_image(translated_text, lang, font_size, text_color, bg_color, width, height, output_folder)
|
184 |
|
185 |
+
# Check if it's the last image
|
186 |
+
frame_count = FRAMES_PER_IMAGE + extra_frames if i == len(language_list) - 1 else FRAMES_PER_IMAGE
|
|
|
|
|
|
|
|
|
187 |
|
188 |
+
# Duplicate frames for smooth video
|
189 |
+
for _ in range(frame_count):
|
190 |
+
frame_filename = os.path.join(frames, f"{frame_index:05d}.png")
|
191 |
+
shutil.copy(img_path, frame_filename)
|
192 |
+
frame_index += 1
|
193 |
|
194 |
# Generate video using FFmpeg
|
195 |
output_video = "multi_language_video.mp4"
|
|
|
252 |
with gr.Accordion('ποΈ Text Style', open=False):
|
253 |
font_size = gr.Slider(20, 200, value=100, step=1, label="π Font Size") # FIXED (Removed comma)
|
254 |
theme = gr.Radio(["Black Background", "White Background"], label="π¨ Theme", value="Black Background")
|
255 |
+
llm_translation = gr.Textbox(label='π LLM Translation', lines=5, placeholder="Enter a word...",value=None)
|
256 |
|
257 |
with gr.Column():
|
258 |
download_video = gr.File(label="π₯ Download Video")
|
259 |
play_video = gr.Video(label="π¬ Generated Video")
|
260 |
|
261 |
# Define Inputs and Outputs
|
262 |
+
input_list = [input_text, language_set, font_size, theme, canvas_size,llm_translation]
|
263 |
output_list = [download_video, play_video]
|
264 |
|
265 |
# Bind Functions to UI Elements
|
|
|
271 |
|
272 |
return demo
|
273 |
|
274 |
+
|
275 |
+
def prompt_maker(text, language_set="Foreign Languages"):
|
276 |
+
languages = {
|
277 |
+
"Foreign Languages": {
|
278 |
+
"en": "English", "zh-CN": "Mandarin Chinese", "hi": "Hindi", "es": "Spanish",
|
279 |
+
"fr": "French", "ar": "Standard Arabic", "bn": "Bengali", "pt": "Portuguese",
|
280 |
+
"ru": "Russian", "ur": "Urdu", "id": "Indonesian", "de": "German", "ja": "Japanese",
|
281 |
+
"pa": "Punjabi", "te": "Telugu", "tr": "Turkish", "ta": "Tamil", "vi": "Vietnamese", "ko": "Korean"
|
282 |
+
},
|
283 |
+
"Local Indian Languages": {
|
284 |
+
"en": "English","hi": "Hindi", "bn": "Bengali", "mr": "Marathi", "te": "Telugu", "ta": "Tamil",
|
285 |
+
"gu": "Gujarati", "ur": "Urdu", "kn": "Kannada", "or": "Odia", "pa": "Punjabi", "ml": "Malayalam",
|
286 |
+
"mai": "Maithili","ne": "Nepali","sa": "Sanskrit","doi": "Dogri","sd": "Sindhi"
|
287 |
+
|
288 |
+
}
|
289 |
+
}
|
290 |
+
|
291 |
+
selected_languages = languages.get(language_set, languages["Foreign Languages"])
|
292 |
+
|
293 |
+
prompt = f'Translate this English word "{text}" to different languages in JSON format. Only write the JSON text:\n'
|
294 |
+
prompt += "{\n"
|
295 |
+
prompt += ",\n".join([f' "{code}": ""' for code in selected_languages])
|
296 |
+
prompt += "\n}"
|
297 |
+
|
298 |
+
return prompt
|
299 |
+
|
300 |
+
def ui2():
|
301 |
+
with gr.Blocks() as demo:
|
302 |
+
gr.Markdown("## π LLM Translation")
|
303 |
+
|
304 |
+
with gr.Row():
|
305 |
+
with gr.Column():
|
306 |
+
input_text = gr.Textbox(label='π Enter a Word', lines=1, placeholder="Enter a word...")
|
307 |
+
language_set = gr.Radio(
|
308 |
+
["Foreign Languages", "Local Indian Languages"],
|
309 |
+
label="π Language Set",
|
310 |
+
value="Foreign Languages"
|
311 |
+
)
|
312 |
+
generate_btn = gr.Button('π Generate', variant='primary')
|
313 |
+
|
314 |
+
with gr.Column():
|
315 |
+
output_text = gr.Textbox(label='π Generated Prompt', lines=5)
|
316 |
+
|
317 |
+
generate_btn.click(prompt_maker, inputs=[input_text, language_set], outputs=output_text)
|
318 |
+
|
319 |
+
return demo
|
320 |
+
|
321 |
+
|
322 |
# Launch the app
|
323 |
+
def main(share=False, debug=True):
|
324 |
+
demo1 = ui()
|
325 |
+
demo2 = ui2()
|
326 |
+
demo=gr.TabbedInterface([demo1, demo2], ["Video Generation", "LLM Prompt"])
|
327 |
demo.queue().launch(debug=debug, share=share)
|
328 |
|
329 |
main()
|