adeeb-khoja commited on
Commit
6d31a7a
·
verified ·
1 Parent(s): bede7da

init commit

Browse files
Files changed (1) hide show
  1. app.py +156 -0
app.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import json
3
+
4
+ from altair import value
5
+ from matplotlib.streamplot import OutOfBounds
6
+ from sympy import substitution, viete
7
+ from extract_audio import VideoHelper
8
+ from helpers.srt_generator import SRTGenerator
9
+ from moderator import DetoxifyModerator
10
+ from shorts_generator import ShortsGenerator
11
+ from subtitles import SubtitlesRenderer
12
+ from transcript_detect import *
13
+ from translation import *
14
+ import gradio as gr
15
+ from dotenv import load_dotenv
16
+
17
+
18
+
19
+ def translate_segments(segments,translator: TranslationModel,from_lang,to_lang):
20
+ transalted_segments = []
21
+ for segment in segments:
22
+ translated_segment_text = translator.translate_text(segment['text'],from_lang,to_lang)
23
+ transalted_segments.append({'text':translated_segment_text,'start':segment['start'],'end':segment['end'],'id':segment['id']})
24
+
25
+ return transalted_segments
26
+
27
+
28
+ def main(file,translate_to_lang):
29
+
30
+ #Extracting the audio from video
31
+ video_file_path = file
32
+ audio_file_path = 'extracted_audio.mp3'
33
+ video_helper = VideoHelper()
34
+ print('Extracting audio from video...')
35
+ video_helper.extract_audio(video_file_path, audio_file_path)
36
+
37
+
38
+ whisper_model = WhisperModel('base')
39
+
40
+ print('Transcriping audio file....')
41
+ transcription = whisper_model.transcribe_audio(audio_file_path)
42
+
43
+ print('Generating transctipt text...')
44
+ transcript_text = whisper_model.get_text(transcription)
45
+
46
+ print('Detecting audio language....')
47
+ detected_language = whisper_model.get_detected_language(transcription)
48
+
49
+ print('Generating transcript segments...')
50
+ transcript_segments = whisper_model.get_segments(transcription)
51
+
52
+
53
+ # Write the transcription to a text file
54
+ print('Writing transcript into text file...')
55
+ transcript_file_path = "transcript.txt"
56
+ with open(transcript_file_path, "w",encoding="utf-8") as file:
57
+ file.write(transcript_text)
58
+
59
+ # Translate transcript
60
+ translation_model = TranslationModel()
61
+ target_language = supported_languages[translate_to_lang]
62
+
63
+ print(f'Translating transcript text from {detected_language} to {target_language}...')
64
+ transalted_text = translation_model.translate_text(transcript_text,detected_language,target_language)
65
+
66
+ # print(f'Translating transcript segments from {detected_language} to {target_language}...')
67
+ # transalted_segments = translate_segments(transcript_segments,translation_model,detected_language,target_language)
68
+
69
+ # Write the translation to a text file
70
+ print('Writing translation text file...')
71
+ translation_file_path = "translation.txt"
72
+ with open(translation_file_path, "w",encoding="utf-8") as file:
73
+ file.write(transalted_text)
74
+
75
+ print('Writing transcsript segments and translated segments to json file...')
76
+ segments_file_path = "segments.json"
77
+ with open(segments_file_path, "w",encoding="utf-8") as file:
78
+ json.dump(transcript_segments, file,ensure_ascii=False)
79
+
80
+ # print('Writing transcsript segments and translated segments to json file...')
81
+ # translated_segments_file_path = "translated_segments.json"
82
+ # with open(translated_segments_file_path, "w",encoding="utf-8") as file:
83
+ # json.dump(transalted_segments, file,ensure_ascii=False)
84
+
85
+ #Run Moderator to detect toxicity
86
+ print('Analyzing and detecing toxicity levels...')
87
+ detoxify_moderator = DetoxifyModerator()
88
+ result = detoxify_moderator.detect_toxicity(transcript_text)
89
+ df = detoxify_moderator.format_results(result)
90
+
91
+
92
+ #Render subtitles on video
93
+ renderer = SubtitlesRenderer()
94
+ subtitles_file_path = 'segments.json'
95
+ output_file_path = 'subtitled_video.mp4'
96
+ subtitled_video = renderer.add_subtitles(video_file_path,subtitles_file_path,output_file_path)
97
+
98
+
99
+
100
+ # Generate short videos from video
101
+ output_srt_file = 'subtitles.srt'
102
+ print('Generating SRT file...')
103
+ #Generate srt file
104
+ SRTGenerator.generate_srt(transcript_segments,output_srt_file)
105
+ shorts_generator = ShortsGenerator()
106
+ print('Generating shorts from important scenes...')
107
+ selected_scenes = shorts_generator.execute(output_srt_file)
108
+ shorts_path_list = shorts_generator.extract_video_scenes( video_file_path, shorts_generator.extract_scenes(selected_scenes.content))
109
+
110
+ return_shorts_list = shorts_path_list + [""] * (3 - len(shorts_path_list))
111
+
112
+
113
+ return transcript_text, transalted_text, df, subtitled_video, return_shorts_list[0], return_shorts_list[1], return_shorts_list[2]
114
+
115
+
116
+
117
+ def interface_function(file,translate_to_lang,with_transcript=False,with_translations=False,with_subtitles=False,with_shorts=False):
118
+
119
+ return main(file,translate_to_lang)
120
+
121
+ supported_languages = {
122
+ "Spanish": "es",
123
+ "French": "fr",
124
+ "German": "de",
125
+ "Russian": "ru",
126
+ "Arabic": "ar",
127
+ "Hindi": "hi"
128
+ }
129
+
130
+
131
+ # Load environment variables from .env file
132
+ load_dotenv()
133
+
134
+
135
+ inputs = [gr.Video(label='Content Video'),gr.Dropdown(list(supported_languages.keys()), label="Target Language"),gr.Checkbox(label="Generate Transcript"),
136
+ gr.Checkbox(label="Translate Transcript"),gr.Checkbox(label="Generate Subtitles"),gr.Checkbox(label="Generate Shorts")]
137
+
138
+ outputs = [gr.Textbox(label="Transcript"), gr.Textbox(label="Translation"),gr.DataFrame(label="Moderation Results"),gr.Video(label='Output Video with Subtitles')]
139
+ short_outputs = [gr.Video(label=f"Short {i+1}") for i in range(3)]
140
+ outputs.extend(short_outputs)
141
+ demo = gr.Interface(
142
+ fn=interface_function,
143
+ inputs=inputs,
144
+ outputs=outputs,
145
+ title="Rosetta AI",
146
+ description="Content Creation Customization"
147
+ )
148
+ # with gr.Blocks() as demo:
149
+ # file_output = gr.File()
150
+ # upload_button = gr.UploadButton("Click to Upload a Video", file_types=["video"], file_count="single")
151
+ # upload_button.upload(main, upload_button, ['text','text'])
152
+
153
+
154
+
155
+ demo.launch()
156
+