osanseviero commited on
Commit
b498e00
·
1 Parent(s): 0828982

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -54
app.py CHANGED
@@ -9,13 +9,8 @@ import os
9
  import sys
10
  from huggingface_hub import snapshot_download
11
 
12
- # 1. GPT-J: Story Generation Pipeline
13
- story_gen = pipeline("text-generation", "pranavpsv/gpt2-genre-story-generator")
14
-
15
- # 2. LatentDiffusion: Latent Diffusion Interface
16
  image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
17
 
18
- # 3. FILM: Frame Interpolation Model (code re-use from spaces/akhaliq/frame-interpolation/tree/main)
19
  os.system("git clone https://github.com/google-research/frame-interpolation")
20
  sys.path.append("frame-interpolation")
21
  from eval import interpolator, util
@@ -36,7 +31,7 @@ def generate_story(choice, input_text):
36
 
37
  return generated_text
38
 
39
- def generate_images(generated_text):
40
  steps=50
41
  width=256
42
  height=256
@@ -55,15 +50,10 @@ def generate_images(generated_text):
55
 
56
  return generated_images
57
 
58
- def generate_interpolation(gallery):
59
  times_to_interpolate = 4
60
 
61
- generated_images = []
62
- for image_str in gallery:
63
- image_str = image_str.replace("data:image/png;base64,","")
64
- decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
65
- img = Image.open(io.BytesIO(decoded_bytes))
66
- generated_images.append(img)
67
 
68
  generated_images[0].save('frame_0.png')
69
  generated_images[1].save('frame_1.png')
@@ -83,47 +73,10 @@ def generate_interpolation(gallery):
83
  demo = gr.Blocks()
84
 
85
  with demo:
86
- with gr.Row():
87
-
88
- # Left column (inputs)
89
- with gr.Column():
90
- input_story_type = gr.Radio(choices=['superhero', 'action', 'drama', 'horror', 'thriller', 'sci_fi'], value='sci_fi', label="Genre")
91
- input_start_text = gr.Textbox(placeholder='A teddy bear outer space', label="Starting Text")
92
-
93
- gr.Markdown("Be sure to run each of the buttons one at a time, they depend on each others' outputs!")
94
-
95
- # Rows of instructions & buttons
96
- with gr.Row():
97
- gr.Markdown("1. Select a type of story, then write some starting text! Then hit the 'Generate Story' button to generate a story! Feel free to edit the generated story afterwards!")
98
- button_gen_story = gr.Button("Generate Story")
99
- with gr.Row():
100
- gr.Markdown("2. After generating a story, hit the 'Generate Images' button to create some visuals for your story! (Can re-run multiple times!)")
101
- button_gen_images = gr.Button("Generate Images")
102
- with gr.Row():
103
- gr.Markdown("3. After generating some images, hit the 'Generate Video' button to create a short video by interpolating the previously generated visuals!")
104
- button_gen_video = gr.Button("Generate Video")
105
-
106
- # Rows of references
107
- with gr.Row():
108
- gr.Markdown("--Models Used--")
109
- with gr.Row():
110
- gr.Markdown("Story Generation: [GPT-J](https://huggingface.co/pranavpsv/gpt2-genre-story-generator)")
111
- with gr.Row():
112
- gr.Markdown("Image Generation Conditioned on Text: [Latent Diffusion](https://huggingface.co/spaces/multimodalart/latentdiffusion) | [Github Repo](https://github.com/CompVis/latent-diffusion)")
113
- with gr.Row():
114
- gr.Markdown("Interpolations: [FILM](https://huggingface.co/spaces/akhaliq/frame-interpolation) | [Github Repo](https://github.com/google-research/frame-interpolation)")
115
- with gr.Row():
116
- gr.Markdown("![visitor badge](https://visitor-badge.glitch.me/badge?page_id=gradio-blocks_story_and_video_generation)")
117
-
118
- # Right column (outputs)
119
- with gr.Column():
120
- output_generated_story = gr.Textbox(label="Generated Story")
121
- output_gallery = gr.Gallery(label="Generated Story Images")
122
- output_interpolation = gr.Video(label="Generated Video")
123
 
124
- # Bind functions to buttons
125
- button_gen_story.click(fn=generate_story, inputs=[input_story_type , input_start_text], outputs=output_generated_story)
126
- button_gen_images.click(fn=generate_images, inputs=output_generated_story, outputs=output_gallery)
127
- button_gen_video.click(fn=generate_interpolation, inputs=output_gallery, outputs=output_interpolation)
128
 
129
  demo.launch(debug=True, enable_queue=True)
 
9
  import sys
10
  from huggingface_hub import snapshot_download
11
 
 
 
 
 
12
  image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
13
 
 
14
  os.system("git clone https://github.com/google-research/frame-interpolation")
15
  sys.path.append("frame-interpolation")
16
  from eval import interpolator, util
 
31
 
32
  return generated_text
33
 
34
+ def generate_images(text):
35
  steps=50
36
  width=256
37
  height=256
 
50
 
51
  return generated_images
52
 
53
+ def generate_interpolation(text):
54
  times_to_interpolate = 4
55
 
56
+ generated_images = generate_interpolation(text)
 
 
 
 
 
57
 
58
  generated_images[0].save('frame_0.png')
59
  generated_images[1].save('frame_1.png')
 
73
  demo = gr.Blocks()
74
 
75
  with demo:
76
+ input_start_text = gr.Textbox(placeholder='A teddy bear outer space', label="Starting Text")
77
+ button_gen_video = gr.Button("Generate Video")
78
+ output_interpolation = gr.Video(label="Generated Video")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
+ button_gen_video.click(fn=generate_interpolation, inputs=input_start_text, outputs=output_interpolation)
 
 
 
81
 
82
  demo.launch(debug=True, enable_queue=True)