Update app.py
Browse files
app.py
CHANGED
@@ -1,73 +1,29 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
3 |
import numpy as np
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
return
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
result = process_frame(frame, bg_image)
|
31 |
-
out.write(result)
|
32 |
-
cap.release()
|
33 |
-
out.release()
|
34 |
-
|
35 |
-
def predict(input_video, bg_image):
|
36 |
-
remove_green_screen(input_video, bg_image)
|
37 |
-
return "output.mp4"
|
38 |
-
|
39 |
-
inputs = [
|
40 |
-
gr.inputs.Video(label="Input Video"),
|
41 |
-
gr.inputs.Image(label="Background Image")
|
42 |
-
]
|
43 |
-
|
44 |
-
outputs = gr.outputs.Video(label="Processed Video", type="auto")
|
45 |
-
|
46 |
-
title = "Green Screen Remover"
|
47 |
-
description = "Upload a video and an image to use as the background to remove the green screen."
|
48 |
-
article = "<p style='text-align: center'><a href='https://github.com/gradio-app/examples/blob/master/green_screen_removal.py'>This code</a> was made into an interactive interface using Gradio. See the full tutorial at <a href='https://blog.gradio.app/green-screen-removal-with-opencv/'>this blog post</a> on the Gradio blog.</p>"
|
49 |
-
examples = [
|
50 |
-
[
|
51 |
-
"https://www.youtube.com/watch?v=clD6_yXKo2I",
|
52 |
-
"https://i.imgur.com/lxIhsG6.jpg"
|
53 |
-
],
|
54 |
-
[
|
55 |
-
"https://www.youtube.com/watch?v=6DfZ6UOZi0A",
|
56 |
-
"https://i.imgur.com/6UaTvfo.jpg"
|
57 |
-
],
|
58 |
-
]
|
59 |
-
|
60 |
-
iface = gr.Interface(
|
61 |
-
fn=predict,
|
62 |
-
inputs=inputs,
|
63 |
-
outputs=outputs,
|
64 |
-
title=title,
|
65 |
-
description=description,
|
66 |
-
article=article,
|
67 |
-
examples=examples,
|
68 |
-
analytics_enabled=False,
|
69 |
-
server_port=8000
|
70 |
-
)
|
71 |
-
|
72 |
-
if __name__ == '__main__':
|
73 |
-
iface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
import requests
|
3 |
import numpy as np
|
4 |
+
from PIL import Image
|
5 |
+
|
6 |
+
def enlarge_image(image, scale):
|
7 |
+
# convert image to bytes
|
8 |
+
img_byte_arr = image.tobytes()
|
9 |
+
# make request to DALL-E 2 API
|
10 |
+
response = requests.post("https://api.dall-e.com/v1/enlarge",
|
11 |
+
data=img_byte_arr,
|
12 |
+
params={"size": scale})
|
13 |
+
# get the image bytes from the response
|
14 |
+
image_bytes = response.content
|
15 |
+
# convert bytes to numpy array
|
16 |
+
img_np = np.array(Image.open(BytesIO(image_bytes)))
|
17 |
+
return img_np
|
18 |
+
|
19 |
+
# create the Gradio interface
|
20 |
+
input_image = gr.inputs.Image(label="Input Image")
|
21 |
+
scale = gr.inputs.Number(label="Scale", default=2)
|
22 |
+
output_image = gr.outputs.Image(label="Enlarged Image")
|
23 |
+
|
24 |
+
title = "DALL-E 2 Image Enlarger"
|
25 |
+
description = "Enlarges an image using DALL-E 2. Enter a scale factor and upload an image."
|
26 |
+
examples = [["examples/butterfly.jpg"]]
|
27 |
+
|
28 |
+
gr.Interface(fn=enlarge_image, inputs=[input_image, scale], outputs=output_image,
|
29 |
+
title=title, description=description, examples=examples).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|