copied some webcam code
Browse files- README.md +1 -1
- app.py +53 -0
- requirements.txt +1 -0
- run.ipynb +1 -0
- run.py +16 -0
README.md
CHANGED
|
@@ -5,7 +5,7 @@ colorFrom: pink
|
|
| 5 |
colorTo: blue
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 5.12.0
|
| 8 |
-
app_file:
|
| 9 |
pinned: false
|
| 10 |
---
|
| 11 |
|
|
|
|
| 5 |
colorTo: blue
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 5.12.0
|
| 8 |
+
app_file: run.py
|
| 9 |
pinned: false
|
| 10 |
---
|
| 11 |
|
app.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
def greet(name):
|
| 4 |
+
return "Hello " + name + "!!"
|
| 5 |
+
|
| 6 |
+
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
| 7 |
+
demo.launch()
|
| 8 |
+
|
| 9 |
+
# import asyncio
|
| 10 |
+
# from gradio_webrtc import AsyncAudioVideoStreamHandler
|
| 11 |
+
|
| 12 |
+
# class GeminiHandler(AsyncAudioVideoStreamHandler):
|
| 13 |
+
# def __init__(
|
| 14 |
+
# self, expected_layout="mono", output_sample_rate=24000, output_frame_size=480
|
| 15 |
+
# ) -> None:
|
| 16 |
+
# super().__init__(
|
| 17 |
+
# expected_layout,
|
| 18 |
+
# output_sample_rate,
|
| 19 |
+
# output_frame_size,
|
| 20 |
+
# input_sample_rate=16000,
|
| 21 |
+
# )
|
| 22 |
+
# self.audio_queue = asyncio.Queue()
|
| 23 |
+
# self.video_queue = asyncio.Queue()
|
| 24 |
+
# self.quit = asyncio.Event()
|
| 25 |
+
# self.session = None
|
| 26 |
+
# self.last_frame_time = 0
|
| 27 |
+
|
| 28 |
+
# def copy(self) -> "GeminiHandler":
|
| 29 |
+
# """Copy gets called whenever a new user connects to the server.
|
| 30 |
+
# This ensures that each user has an independent handler.
|
| 31 |
+
# """
|
| 32 |
+
# return GeminiHandler(
|
| 33 |
+
# expected_layout=self.expected_layout,
|
| 34 |
+
# output_sample_rate=self.output_sample_rate,
|
| 35 |
+
# output_frame_size=self.output_frame_size,
|
| 36 |
+
# )
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
# async def video_receive(self, frame: np.ndarray):
|
| 40 |
+
# """Send video frames to the server"""
|
| 41 |
+
# if self.session:
|
| 42 |
+
# # send image every 1 second
|
| 43 |
+
# # otherwise we flood the API
|
| 44 |
+
# if time.time() - self.last_frame_time > 1:
|
| 45 |
+
# self.last_frame_time = time.time()
|
| 46 |
+
# await self.session.send(encode_image(frame))
|
| 47 |
+
# if self.latest_args[2] is not None:
|
| 48 |
+
# await self.session.send(encode_image(self.latest_args[2]))
|
| 49 |
+
# self.video_queue.put_nowait(frame)
|
| 50 |
+
|
| 51 |
+
# async def video_emit(self) -> VideoEmitType:
|
| 52 |
+
# """Return video frames to the client"""
|
| 53 |
+
# return await self.video_queue.get()
|
requirements.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
transformers
|
run.ipynb
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: webcam"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["\n", "import gradio as gr\n", "\n", "\n", "def snap(image, video):\n", " return [image, video]\n", "\n", "\n", "demo = gr.Interface(\n", " snap,\n", " [gr.Image(source=\"webcam\", tool=None), gr.Video(source=\"webcam\")],\n", " [\"image\", \"video\"],\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
run.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import gradio as gr
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def snap(image, video):
|
| 6 |
+
return [image, video]
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
demo = gr.Interface(
|
| 10 |
+
snap,
|
| 11 |
+
[gr.Image(source="webcam", tool=None), gr.Video(source="webcam")],
|
| 12 |
+
["image", "video"],
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
if __name__ == "__main__":
|
| 16 |
+
demo.launch()
|