om-app commited on
Commit
d306302
·
1 Parent(s): 1874dee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -71
app.py CHANGED
@@ -1,73 +1,29 @@
1
  import gradio as gr
2
- import cv2
3
  import numpy as np
4
-
5
- def process_frame(frame, bg_image):
6
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
7
- hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
8
- lower_green = np.array([45, 100, 50])
9
- upper_green = np.array([75, 255, 255])
10
- mask = cv2.inRange(hsv, lower_green, upper_green)
11
- mask_inv = cv2.bitwise_not(mask)
12
- bg = cv2.imread(bg_image)
13
- bg = cv2.resize(bg, (frame.shape[1], frame.shape[0]))
14
- fg = cv2.bitwise_and(frame, frame, mask=mask_inv)
15
- bg = cv2.bitwise_and(bg, bg, mask=mask)
16
- result = cv2.add(bg, fg)
17
- return result
18
-
19
- def remove_green_screen(input_video, bg_image):
20
- cap = cv2.VideoCapture(input_video.name)
21
- codec = cv2.VideoWriter_fourcc(*"mp4v")
22
- fps = int(cap.get(cv2.CAP_PROP_FPS))
23
- frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
24
- frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
25
- out = cv2.VideoWriter("output.mp4", codec, fps, (frame_width, frame_height))
26
- while True:
27
- ret, frame = cap.read()
28
- if not ret:
29
- break
30
- result = process_frame(frame, bg_image)
31
- out.write(result)
32
- cap.release()
33
- out.release()
34
-
35
- def predict(input_video, bg_image):
36
- remove_green_screen(input_video, bg_image)
37
- return "output.mp4"
38
-
39
- inputs = [
40
- gr.inputs.Video(label="Input Video"),
41
- gr.inputs.Image(label="Background Image")
42
- ]
43
-
44
- outputs = gr.outputs.Video(label="Processed Video", type="auto")
45
-
46
- title = "Green Screen Remover"
47
- description = "Upload a video and an image to use as the background to remove the green screen."
48
- article = "<p style='text-align: center'><a href='https://github.com/gradio-app/examples/blob/master/green_screen_removal.py'>This code</a> was made into an interactive interface using Gradio. See the full tutorial at <a href='https://blog.gradio.app/green-screen-removal-with-opencv/'>this blog post</a> on the Gradio blog.</p>"
49
- examples = [
50
- [
51
- "https://www.youtube.com/watch?v=clD6_yXKo2I",
52
- "https://i.imgur.com/lxIhsG6.jpg"
53
- ],
54
- [
55
- "https://www.youtube.com/watch?v=6DfZ6UOZi0A",
56
- "https://i.imgur.com/6UaTvfo.jpg"
57
- ],
58
- ]
59
-
60
- iface = gr.Interface(
61
- fn=predict,
62
- inputs=inputs,
63
- outputs=outputs,
64
- title=title,
65
- description=description,
66
- article=article,
67
- examples=examples,
68
- analytics_enabled=False,
69
- server_port=8000
70
- )
71
-
72
- if __name__ == '__main__':
73
- iface.launch()
 
1
  import gradio as gr
2
+ import requests
3
  import numpy as np
4
+ from PIL import Image
5
+
6
+ def enlarge_image(image, scale):
7
+ # convert image to bytes
8
+ img_byte_arr = image.tobytes()
9
+ # make request to DALL-E 2 API
10
+ response = requests.post("https://api.dall-e.com/v1/enlarge",
11
+ data=img_byte_arr,
12
+ params={"size": scale})
13
+ # get the image bytes from the response
14
+ image_bytes = response.content
15
+ # convert bytes to numpy array
16
+ img_np = np.array(Image.open(BytesIO(image_bytes)))
17
+ return img_np
18
+
19
+ # create the Gradio interface
20
+ input_image = gr.inputs.Image(label="Input Image")
21
+ scale = gr.inputs.Number(label="Scale", default=2)
22
+ output_image = gr.outputs.Image(label="Enlarged Image")
23
+
24
+ title = "DALL-E 2 Image Enlarger"
25
+ description = "Enlarges an image using DALL-E 2. Enter a scale factor and upload an image."
26
+ examples = [["examples/butterfly.jpg"]]
27
+
28
+ gr.Interface(fn=enlarge_image, inputs=[input_image, scale], outputs=output_image,
29
+ title=title, description=description, examples=examples).launch()