mrfakename commited on
Commit
4439915
·
verified ·
1 Parent(s): 442c923

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -69
app.py CHANGED
@@ -1,73 +1,37 @@
1
- # Copyright (c) 2024, please contact before redistribution/modification
2
-
3
- FONT_URL = 'https://fonts.gstatic.com/s/inter/v13/UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuLyfAZ9hjQ.ttf'
4
- from moviepy.editor import *
5
- import whisper
6
- from cached_path import cached_path
7
- from moviepy.video.tools.subtitles import SubtitlesClip
8
- import spaces
9
- import torch
10
- import tempfile
11
  import gradio as gr
12
- mdl = whisper.load_model("base")
13
- if torch.cuda.is_available(): mdl.to('cuda')
14
- @spaces.GPU(enable_queue=True)
15
- def subtitle(input):
16
- status = "**Starting...**"
17
- yield status, gr.update()
18
- gr.Info("Transcribing...")
19
- status += "\n\n[1/5] Transcribing... (may take a while)"
20
- yield status, gr.update()
21
- transcript = mdl.transcribe(
22
- word_timestamps=True,
23
- audio=input
24
- )
25
- status += "\n\n[2/5] Processing subtitles..."
26
- yield status, gr.update()
27
- gr.Info("Processing subtitles...")
28
- subs = []
29
- for segment in transcript['segments']:
30
- for word in segment['words']:
31
- subs.append(((word['start'], word['end'],), word['word'].strip(),))
32
- status += "\n\n[3/5] Loading video..."
33
- yield status, gr.update()
34
- gr.Info("Loading video...")
35
- video = VideoFileClip(input)
36
- width, height = video.size
37
- gr.Info(width)
38
- generator = lambda txt: TextClip(txt, size=(width * (3 / 4) + 8, None), color='white', stroke_color='black', stroke_width=8, method='caption', fontsize=min(width / 7, height / 7), font=str(cached_path(FONT_URL)))
39
- generator1 = lambda txt: TextClip(txt, size=(width * (3 / 4), None), color='white', method='caption', fontsize=min(width / 7, height / 7), font=str(cached_path(FONT_URL)))
40
- status += "\n\n[4/5] Loading video clip..."
41
- yield status, gr.update()
42
- gr.Info("Loading video clip...")
43
- subtitles = SubtitlesClip(subs, generator)
44
- subtitles2 = SubtitlesClip(subs, generator1)
45
- result_1 = CompositeVideoClip([video, subtitles.set_pos(('center','center'))])
46
- result = CompositeVideoClip([result_1, subtitles2.set_pos(('center','center'))])
47
- status += "\n\n[5/5] Writing video... (may take a while)"
48
- yield status, gr.update()
49
- gr.Info("Writing video...")
50
- with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as f:
51
- result.write_videofile(f.name, codec='h264_videotoolbox', audio_codec='aac', threads=64)
52
- status += "\n\n**Done!**"
53
- yield status, f.name
54
- return
55
-
56
- with gr.Blocks() as demo:
57
- gr.Markdown("""
58
- # AutoSubs
59
 
60
- Automatically add on-screen subtitles to your videos.
 
 
 
 
 
 
 
 
 
 
61
 
62
- **NOTE:** Uploading copyrighted/NSFW content to this service is strictly prohibited.
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
- The maximum length of video is 15 minutes. This service probably won't work well on non-English videos.
65
-
66
- Powered by OAI Whisper & MoviePy!
67
- """)
68
- status = gr.Markdown("**Status updates will appear here.**")
69
- vid_inp = gr.Video(interactive=True, label="Upload or record video", max_length=900)
70
- go_btn = gr.Button("Transcribe!", variant="primary")
71
- vid_out = gr.Video(interactive=False, label="Result")
72
- go_btn.click(subtitle, inputs=[vid_inp], outputs=[status, vid_out])
73
- demo.queue(api_open=False).launch(show_api=False)
 
1
+ import torch, spaces
 
 
 
 
 
 
 
 
 
2
  import gradio as gr
3
+ from diffusers import FluxPipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
+ MODELS = {
6
+ 'FLUX.1 [dev]': 'black-forest-labs/FLUX.1-dev',
7
+ 'FLUX.1 [schnell]', 'black-forest-labs/FLUX.1-schnell',
8
+ 'OpenFLUX.1': 'ostris/OpenFLUX.1',
9
+ }
10
+ MODEL_CACHE = {}
11
+ for id, model in MODELS:
12
+ print(f"Loading model {model}...")
13
+ MODEL_CACHE[model] = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
14
+ pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power
15
+ print(f"Loaded model {model}")
16
 
17
+ @spaces.GPU
18
+ def generate(text)
19
+ prompt = "A cat holding a sign that says hello world"
20
+ image = MODEL_CACHE['OpenFLUX.1'](
21
+ prompt,
22
+ height=1024,
23
+ width=1024,
24
+ guidance_scale=3.5,
25
+ num_inference_steps=50,
26
+ max_sequence_length=512,
27
+ generator=torch.Generator("cpu").manual_seed(0)
28
+ ).images[0]
29
+ return image
30
+ # image.save("flux-dev.png")
31
 
32
+ with gr.Blocks() as demo:
33
+ prompt = gr.Textbox("Prompt")
34
+ btn = gr.Button("Generate", variant="primary")
35
+ out = gr.Image(label="Generated image", interactive=False)
36
+ btn.click(generate,inputs=prompt,outputs=out)
37
+ demo.queue().launch()