repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
gradio-app/gradio | 66 | gradio-app__gradio-66 | [
"64"
] | 6942d2e44db1eba136f8f2d5103f0c0cf17e0289 | diff --git a/gradio/interface.py b/gradio/interface.py
--- a/gradio/interface.py
+++ b/gradio/interface.py
@@ -3,8 +3,6 @@
interface using the input and output types.
"""
-import tempfile
-import webbrowser
from gradio.inputs import InputComponent
from gradio.outputs import OutputComponent
from gradio import networking, strings, utils
@@ -12,8 +10,8 @@
import requests
import random
import time
+import webbrowser
import inspect
-from IPython import get_ipython
import sys
import weakref
import analytics
@@ -209,21 +207,14 @@ def run_prediction(self, processed_input, return_duration=False):
start = time.time()
if self.capture_session and self.session is not None:
graph, sess = self.session
- with graph.as_default():
- with sess.as_default():
- prediction = predict_fn(*processed_input)
+ with graph.as_default(), sess.as_default():
+ prediction = predict_fn(*processed_input)
else:
try:
prediction = predict_fn(*processed_input)
except ValueError as exception:
- if str(exception).endswith("is not an element of this "
- "graph."):
- raise ValueError("It looks like you might be using "
- "tensorflow < 2.0. Please "
- "pass capture_session=True in "
- "Interface to avoid the 'Tensor is "
- "not an element of this graph.' "
- "error.")
+ if str(exception).endswith("is not an element of this graph."):
+ raise ValueError(strings.en["TF1_ERROR"])
else:
raise exception
duration = time.time() - start
@@ -238,17 +229,11 @@ def run_prediction(self, processed_input, return_duration=False):
else:
return predictions
-
- def process(self, raw_input, predict_fn=None):
+ def process(self, raw_input):
"""
- :param raw_input: a list of raw inputs to process and apply the
- prediction(s) on.
- :param predict_fn: which function to process. If not provided, all of the model functions are used.
- :return:
- processed output: a list of processed outputs to return as the
- prediction(s).
- duration: a list of time deltas measuring inference time for each
- prediction fn.
+ :param raw_input: a list of raw inputs to process and apply the prediction(s) on.
+ processed output: a list of processed outputs to return as the prediction(s).
+ duration: a list of time deltas measuring inference time for each prediction fn.
"""
processed_input = [input_interface.preprocess(raw_input[i])
for i, input_interface in enumerate(self.input_interfaces)]
@@ -258,6 +243,11 @@ def process(self, raw_input, predict_fn=None):
return processed_output, durations
def interpret(self, raw_input):
+ """
+ Runs the interpretation command for the machine learning model. Handles both the "default" out-of-the-box
+ interpretation for a certain set of UI component types, as well as the custom interpretation case.
+ :param raw_input: a list of raw inputs to apply the interpretation(s) on.
+ """
if self.interpretation == "default":
interpreter = gradio.interpretation.default()
processed_input = []
@@ -270,9 +260,22 @@ def interpret(self, raw_input):
interpretation = interpreter(self, processed_input)
else:
processed_input = [input_interface.preprocess(raw_input[i])
- for i, input_interface in enumerate(self.input_interfaces)]
+ for i, input_interface in enumerate(self.input_interfaces)]
interpreter = self.interpretation
- interpretation = interpreter(*processed_input)
+
+ if self.capture_session and self.session is not None:
+ graph, sess = self.session
+ with graph.as_default(), sess.as_default():
+ interpretation = interpreter(*processed_input)
+ else:
+ try:
+ interpretation = interpreter(*processed_input)
+ except ValueError as exception:
+ if str(exception).endswith("is not an element of this graph."):
+ raise ValueError(strings.en["TF1_ERROR"])
+ else:
+ raise exception
+
if len(raw_input) == 1:
interpretation = [interpretation]
return interpretation
@@ -422,9 +425,9 @@ def launch(self, inline=None, inbrowser=None, share=False, debug=False):
if not is_in_interactive_mode:
self.run_until_interrupted(thread, path_to_local_server)
-
return app, path_to_local_server, share_url
+
def reset_all():
for io in Interface.get_instances():
io.close()
diff --git a/gradio/interpretation.py b/gradio/interpretation.py
--- a/gradio/interpretation.py
+++ b/gradio/interpretation.py
@@ -1,6 +1,5 @@
from gradio.inputs import Image, Textbox
from gradio.outputs import Label
-from gradio import processing_utils
from skimage.segmentation import slic
import numpy as np
@@ -8,6 +7,7 @@
Image: "numpy",
}
+
def default(separator=" ", n_segments=20):
"""
Basic "default" interpretation method that uses "leave-one-out" to explain predictions for
diff --git a/gradio/strings.py b/gradio/strings.py
--- a/gradio/strings.py
+++ b/gradio/strings.py
@@ -7,4 +7,6 @@
"PUBLIC_SHARE_TRUE": "To create a public link, set `share=True` in the argument to `launch()`.",
"MODEL_PUBLICLY_AVAILABLE_URL": "Model available publicly at: {} (may take up to a minute for link to be usable)",
"GENERATING_PUBLIC_LINK": "Generating public link (may take a few seconds...):",
+ "TF1_ERROR": "It looks like you might be using tensorflow < 2.0. Please pass capture_session=True in Interface() to"
+ " avoid the 'Tensor is not an element of this graph.' error."
}
| diff --git a/test/test_interpretation.py b/test/test_interpretation.py
new file mode 100644
--- /dev/null
+++ b/test/test_interpretation.py
@@ -0,0 +1,47 @@
+import unittest
+import gradio.interpretation
+import gradio.test_data
+from gradio.processing_utils import decode_base64_to_image
+from gradio import Interface
+import numpy as np
+
+
+class TestDefault(unittest.TestCase):
+ def setUp(self):
+ self.default_method = gradio.interpretation.default()
+
+ def test_default_text(self):
+ max_word_len = lambda text: max([len(word) for word in text.split(" ")])
+ text_interface = Interface(max_word_len, "textbox", "label")
+ interpretation = self.default_method(text_interface, ["quickest brown fox"])[0]
+ self.assertGreater(interpretation[0][1], 0) # Checks to see if the first letter has >0 score.
+ self.assertEqual(interpretation[-1][1], 0) # Checks to see if the last letter has 0 score.
+
+ def test_default_image(self):
+ max_pixel_value = lambda img: img.max()
+ img_interface = Interface(max_pixel_value, "image", "label")
+ array = np.zeros((100,100))
+ array[0, 0] = 1
+ interpretation = self.default_method(img_interface, [array])[0]
+ self.assertGreater(interpretation[0][0], 0) # Checks to see if the top-left has >0 score.
+
+
+class TestCustom(unittest.TestCase):
+ def test_custom_text(self):
+ max_word_len = lambda text: max([len(word) for word in text.split(" ")])
+ custom = lambda text: [(char, 1) for char in text]
+ text_interface = Interface(max_word_len, "textbox", "label", interpretation=custom)
+ result = text_interface.interpret(["quickest brown fox"])[0]
+ self.assertEqual(result[0][1], 1) # Checks to see if the first letter has score of 1.
+
+ def test_custom_img(self):
+ max_pixel_value = lambda img: img.max()
+ custom = lambda img: img.tolist()
+ img_interface = Interface(max_pixel_value, "image", "label", interpretation=custom)
+ result = img_interface.interpret([gradio.test_data.BASE64_IMAGE])[0]
+ expected_result = np.asarray(decode_base64_to_image(gradio.test_data.BASE64_IMAGE).convert('RGB')).tolist()
+ self.assertEqual(result, expected_result)
+
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
| Interpretation doesn't work in Tensorflow 1.x
For TF 1.x models, interpretation doesn't set up the correct session
| 2020-10-05T12:44:53 |
|
gradio-app/gradio | 365 | gradio-app__gradio-365 | [
"343"
] | f361f36625d8c2aab5713ce9f073feb741284003 | diff --git a/gradio/mix.py b/gradio/mix.py
--- a/gradio/mix.py
+++ b/gradio/mix.py
@@ -45,18 +45,20 @@ def __init__(self, *interfaces, **options):
def connected_fn(*data): # Run each function with the appropriate preprocessing and postprocessing
for idx, io in enumerate(interfaces):
- # skip preprocessing for first interface since the compound interface will include it
- if idx > 0:
- data = [input_interface.preprocess(data[i]) for i, input_interface in enumerate(io.input_components)]
+ # skip preprocessing for first interface since the Series interface will include it
+ if idx > 0 and not(io.api_mode):
+ data = [input_component.preprocess(data[i]) for i, input_component in enumerate(io.input_components)]
+
# run all of predictions sequentially
predictions = []
for predict_fn in io.predict:
prediction = predict_fn(*data)
predictions.append(prediction)
data = predictions
- # skip postprocessing for final interface since the compound interface will include it
- if idx < len(interfaces) - 1:
- data = [output_interface.postprocess(data[i]) for i, output_interface in enumerate(io.output_components)]
+ # skip postprocessing for final interface since the Series interface will include it
+ if idx < len(interfaces) - 1 and not(io.api_mode):
+ data = [output_component.postprocess(data[i]) for i, output_component in enumerate(io.output_components)]
+
return data[0]
connected_fn.__name__ = " => ".join([f[0].__name__ for f in fns])
@@ -65,6 +67,7 @@ def connected_fn(*data): # Run each function with the appropriate preprocessing
"fn": connected_fn,
"inputs": interfaces[0].input_components,
"outputs": interfaces[-1].output_components,
+ "api_mode": interfaces[0].api_mode, # TODO(abidlabs): allow mixing api_mode and non-api_mode interfaces
}
kwargs.update(options)
super().__init__(**kwargs)
| diff --git a/test/test_mix.py b/test/test_mix.py
--- a/test/test_mix.py
+++ b/test/test_mix.py
@@ -4,6 +4,11 @@
import os
+"""
+WARNING: Some of these tests have an external dependency: namely that Hugging Face's Hub and Space APIs do not change, and they keep their most famous models up. So if, e.g. Spaces is down, then these test will not pass.
+"""
+
+
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
@@ -15,6 +20,14 @@ def test_in_interface(self):
series = mix.Series(io1, io2)
self.assertEqual(series.process(["Hello"])[0], ["Hello World!"])
+ def test_with_external(self):
+ io1 = gr.Interface.load("spaces/abidlabs/image-identity")
+ io2 = gr.Interface.load("spaces/abidlabs/image-classifier")
+ series = mix.Series(io1, io2)
+ output = series("test/test_data/lion.jpg")
+ self.assertGreater(output['lion'], 0.5)
+
+
class TestParallel(unittest.TestCase):
def test_in_interface(self):
io1 = gr.Interface(lambda x: x + " World 1!", "textbox",
@@ -24,6 +37,13 @@ def test_in_interface(self):
parallel = mix.Parallel(io1, io2)
self.assertEqual(parallel.process(["Hello"])[0], ["Hello World 1!",
"Hello World 2!"])
+ def test_with_external(self):
+ io1 = gr.Interface.load("spaces/abidlabs/english_to_spanish")
+ io2 = gr.Interface.load("spaces/abidlabs/english2german")
+ parallel = mix.Parallel(io1, io2)
+ hello_es, hello_de = parallel("Hello")
+ self.assertIn("hola", hello_es.lower())
+ self.assertIn("hallo", hello_de.lower())
if __name__ == '__main__':
| Series and Parallel don't work if prediction contains an ndarray when using Spaces
I was trying to recreate the example from this tweet:
https://twitter.com/abidlabs/status/1457753971075002376
```
import gradio as gr
general_classifier = gr.Interface.load("spaces/abidlabs/vision-transformer")
bird_classifier = gr.Interface.load("spaces/akhaliq/bird_classifier")
gr.Parallel(general_classifier, bird_classifier).launch(debug=True)
```
But this fails with the following error:
```
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/gradio/networking.py", line 193, in predict
prediction, durations = app.interface.process(raw_input)
File "/usr/local/lib/python3.7/dist-packages/gradio/interface.py", line 364, in process
processed_input, return_duration=True)
File "/usr/local/lib/python3.7/dist-packages/gradio/interface.py", line 332, in run_prediction
prediction = predict_fn(*processed_input)
File "/usr/local/lib/python3.7/dist-packages/gradio/external.py", line 203, in fn
data = json.dumps({"data": data})
File "/usr/lib/python3.7/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/usr/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/usr/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type ndarray is not JSON serializable
```

| You’re right! We’re working on patching Parallel() as soon as possible
On Mon, Nov 8, 2021 at 6:47 PM Jeroen Van Goey ***@***.***>
wrote:
> I was trying to recreate the example from this tweet:
>
> https://twitter.com/abidlabs/status/1457753971075002376
>
> import gradio as gr
>
> general_classifier = gr.Interface.load("spaces/abidlabs/vision-transformer")
> bird_classifier = gr.Interface.load("spaces/akhaliq/bird_classifier")
>
> gr.Parallel(general_classifier, bird_classifier).launch(debug=True)
>
> But this fails with the following error:
>
> Traceback (most recent call last):
> File "/usr/local/lib/python3.7/dist-packages/gradio/networking.py", line 193, in predict
> prediction, durations = app.interface.process(raw_input)
> File "/usr/local/lib/python3.7/dist-packages/gradio/interface.py", line 364, in process
> processed_input, return_duration=True)
> File "/usr/local/lib/python3.7/dist-packages/gradio/interface.py", line 332, in run_prediction
> prediction = predict_fn(*processed_input)
> File "/usr/local/lib/python3.7/dist-packages/gradio/external.py", line 203, in fn
> data = json.dumps({"data": data})
> File "/usr/lib/python3.7/json/__init__.py", line 231, in dumps
> return _default_encoder.encode(obj)
> File "/usr/lib/python3.7/json/encoder.py", line 199, in encode
> chunks = self.iterencode(o, _one_shot=True)
> File "/usr/lib/python3.7/json/encoder.py", line 257, in iterencode
> return _iterencode(o, 0)
> File "/usr/lib/python3.7/json/encoder.py", line 179, in default
> raise TypeError(f'Object of type {o.__class__.__name__} '
> TypeError: Object of type ndarray is not JSON serializable
>
> [image: parallel]
> <https://user-images.githubusercontent.com/59344/140841080-51537e88-5999-4b44-8c0a-0a6b9b5cc778.png>
>
> —
> You are receiving this because you are subscribed to this thread.
> Reply to this email directly, view it on GitHub
> <https://github.com/gradio-app/gradio/issues/343>, or unsubscribe
> <https://github.com/notifications/unsubscribe-auth/AANSE6NY7AJMU7CSGE5MVNTULBVSVANCNFSM5HUAWTUA>
> .
> Triage notifications on the go with GitHub Mobile for iOS
> <https://apps.apple.com/app/apple-store/id1477376905?ct=notification-email&mt=8&pt=524675>
> or Android
> <https://play.google.com/store/apps/details?id=com.github.android&referrer=utm_campaign%3Dnotification-email%26utm_medium%3Demail%26utm_source%3Dgithub>.
>
>
--
Abubakar Abid
Department of Electrical Engineering
Stanford University || Paul & Daisy Soros Fellow
@abidlabs <https://twitter.com/abidlabs> || *abidlabs.github.io
<http://abidlabs.github.io>*
Should work now!
Thanks for the quick fix! I can confirm that it now works for Parallel, but on gradio 2.4.5 Series is still broken. For example:
```
import gradio as gr
remove_bg = gr.Interface.load("spaces/eugenesiow/remove-bg")
anime = gr.Interface.load("spaces/akhaliq/AnimeGANv2")
gr.Series(remove_bg, anime).launch()
```
fails with:
```
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/gradio/networking.py", line 195, in predict
prediction, durations = app.interface.process(raw_input)
File "/usr/local/lib/python3.7/dist-packages/gradio/interface.py", line 372, in process
processed_input, return_duration=True)
File "/usr/local/lib/python3.7/dist-packages/gradio/interface.py", line 337, in run_prediction
prediction = predict_fn(*processed_input)
File "/usr/local/lib/python3.7/dist-packages/gradio/mix.py", line 54, in connected_fn
prediction = predict_fn(*data)
File "/usr/local/lib/python3.7/dist-packages/gradio/external.py", line 203, in fn
data = json.dumps({"data": data})
File "/usr/lib/python3.7/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/usr/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/usr/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type ndarray is not JSON serializable
```

Ah you're right, thanks will fix this issue | 2021-11-15T14:42:30 |
gradio-app/gradio | 420 | gradio-app__gradio-420 | [
"419"
] | eb7194f7563def402d5efa6c3afff781340065a5 | diff --git a/gradio/interface.py b/gradio/interface.py
--- a/gradio/interface.py
+++ b/gradio/interface.py
@@ -30,7 +30,7 @@ class Interface:
Interfaces are created with Gradio by constructing a `gradio.Interface()` object or by calling `gradio.Interface.load()`.
"""
- instances = weakref.WeakSet() # stores all currently existing Interface instances
+ instances = weakref.WeakSet() # stores references to all currently existing Interface instances
@classmethod
def get_instances(cls):
@@ -76,9 +76,8 @@ def __init__(self, fn, inputs=None, outputs=None, verbose=None, examples=None,
examples_per_page=10, live=False, layout="unaligned", show_input=True, show_output=True,
capture_session=None, interpretation=None, num_shap=2.0, theme=None, repeat_outputs_per_model=True,
title=None, description=None, article=None, thumbnail=None,
- css=None, server_port=None, server_name=None, height=500, width=900,
- allow_screenshot=True, allow_flagging=None, flagging_options=None, encrypt=False,
- show_tips=None, flagging_dir="flagged", analytics_enabled=None, enable_queue=None, api_mode=None):
+ css=None, height=500, width=900, allow_screenshot=True, allow_flagging=None, flagging_options=None,
+ encrypt=False, show_tips=None, flagging_dir="flagged", analytics_enabled=None, enable_queue=None, api_mode=None):
"""
Parameters:
fn (Callable): the function to wrap an interface around.
@@ -175,12 +174,7 @@ def __init__(self, fn, inputs=None, outputs=None, verbose=None, examples=None,
"Examples argument must either be a directory or a nested list, where each sublist represents a set of inputs.")
self.num_shap = num_shap
self.examples_per_page = examples_per_page
-
- self.server_name = server_name
- self.server_port = server_port
- if server_name is not None or server_port is not None:
- warnings.warn("The server_name and server_port parameters in the `Interface` class will be deprecated. Please provide them in the `launch()` method instead.")
-
+
self.simple_server = None
self.allow_screenshot = allow_screenshot
# For allow_flagging and analytics_enabled: (1) first check for parameter, (2) check for environment variable, (3) default to True
@@ -586,7 +580,6 @@ def launch(self, inline=None, inbrowser=None, share=False, debug=False,
path_to_local_server (str): Locally accessible link
share_url (str): Publicly accessible link (if share=True)
"""
-
# Set up local flask server
config = self.get_config_file()
self.config = config
@@ -595,6 +588,7 @@ def launch(self, inline=None, inbrowser=None, share=False, debug=False,
self.auth = auth
self.auth_message = auth_message
self.show_tips = show_tips
+ self.show_error = show_error
# Request key for encryption
if self.encrypt:
@@ -602,19 +596,18 @@ def launch(self, inline=None, inbrowser=None, share=False, debug=False,
getpass.getpass("Enter key for encryption: "))
# Store parameters
- server_name = server_name or self.server_name or networking.LOCALHOST_NAME
- server_port = server_port or self.server_port or networking.INITIAL_PORT_VALUE
if self.enable_queue is None:
self.enable_queue = enable_queue
# Launch local flask server
- server_port, path_to_local_server, app, thread = networking.start_server(
+ server_port, path_to_local_server, app, thread, server = networking.start_server(
self, server_name, server_port, self.auth)
self.local_url = path_to_local_server
self.server_port = server_port
self.status = "RUNNING"
- self.server = app
- self.show_error = show_error
+ self.server = server
+ self.server_app = app
+ self.server_thread = thread
# Count number of launches
utils.launch_counter()
@@ -709,21 +702,18 @@ def launch(self, inline=None, inbrowser=None, share=False, debug=False,
return app, path_to_local_server, share_url
- def close(self):
+ def close(self, verbose=True):
"""
Closes the Interface that was launched. This will close the server and free the port.
"""
try:
- if self.share_url:
- requests.get("{}/shutdown".format(self.share_url))
- print("Closing Gradio server on port {}...".format(self.server_port))
- elif self.local_url:
- requests.get("{}shutdown".format(self.local_url))
- print("Closing Gradio server on port {}...".format(self.server_port))
- else:
- pass # server not running
- except (requests.ConnectionError, ConnectionResetError):
- pass # server is already closed
+ self.server.shutdown()
+ self.server_thread.join()
+ print("Closing server running on port: {}".format(self.server_port))
+ except AttributeError: # can't close if not running
+ pass
+ except OSError: # sometimes OSError is thrown when shutting down
+ pass
def integrate(self, comet_ml=None, wandb=None, mlflow=None):
"""
@@ -764,11 +754,13 @@ def integrate(self, comet_ml=None, wandb=None, mlflow=None):
utils.integration_analytics(data)
-def close_all():
+def close_all(verbose=True):
+ # Tries to close all running interfaces, but method is a little flaky.
for io in Interface.get_instances():
- io.close()
+ io.close(verbose)
def reset_all():
- warnings.warn("The `reset_all()` method has been renamed to `close_all()`. Please use `close_all()` instead.")
+ warnings.warn("The `reset_all()` method has been renamed to `close_all()` "
+ "and will be deprecated. Please use `close_all()` instead.")
close_all()
diff --git a/gradio/networking.py b/gradio/networking.py
--- a/gradio/networking.py
+++ b/gradio/networking.py
@@ -2,45 +2,44 @@
Defines helper methods useful for setting up ports, launching servers, and handling `ngrok`
"""
-import os
-import socket
-import threading
+import csv
+import datetime
from flask import Flask, request, session, jsonify, abort, send_file, render_template, redirect
from flask_cachebuster import CacheBuster
from flask_login import LoginManager, login_user, current_user, login_required
from flask_cors import CORS
-import threading
-import pkg_resources
-import datetime
-import time
+from functools import wraps
+import inspect
+import io
import json
-import urllib.request
-from shutil import copyfile
+import logging
+import os
+import pkg_resources
import requests
+import socket
import sys
-import csv
-import logging
-from gradio.tunneling import create_tunnel
-from gradio import encryptor
-from gradio import queue
-from functools import wraps
-import io
-import inspect
+import threading
+import time
import traceback
+import urllib.request
from werkzeug.security import safe_join
+from werkzeug.serving import make_server
+from gradio import encryptor
+from gradio import queue
+from gradio.tunneling import create_tunnel
-INITIAL_PORT_VALUE = int(os.getenv(
- 'GRADIO_SERVER_PORT', "7860")) # The http server will try to open on port 7860. If not available, 7861, 7862, etc.
-TRY_NUM_PORTS = int(os.getenv(
- 'GRADIO_NUM_PORTS', "100")) # Number of ports to try before giving up and throwing an exception.
-LOCALHOST_NAME = os.getenv(
- 'GRADIO_SERVER_NAME', "127.0.0.1")
+# By default, the http server will try to open on port 7860. If not available, 7861, 7862, etc.
+INITIAL_PORT_VALUE = int(os.getenv('GRADIO_SERVER_PORT', "7860"))
+# Number of ports to try before giving up and throwing an exception.
+TRY_NUM_PORTS = int(os.getenv('GRADIO_NUM_PORTS', "100"))
+LOCALHOST_NAME = os.getenv('GRADIO_SERVER_NAME', "127.0.0.1")
GRADIO_API_SERVER = "https://api.gradio.app/v1/tunnel-request"
GRADIO_FEATURE_ANALYTICS_URL = "https://api.gradio.app/gradio-feature-analytics/"
STATIC_TEMPLATE_LIB = pkg_resources.resource_filename("gradio", "templates/")
STATIC_PATH_LIB = pkg_resources.resource_filename("gradio", "templates/frontend/static")
VERSION_FILE = pkg_resources.resource_filename("gradio", "version.txt")
+
with open(VERSION_FILE) as version_file:
GRADIO_STATIC_ROOT = "https://gradio.s3-us-west-2.amazonaws.com/" + \
version_file.read().strip() + "/static/"
@@ -426,10 +425,22 @@ def queue_thread(path_to_local_server, test_mode=False):
break
-def start_server(interface, server_name, server_port, auth=None, ssl=None):
- port = get_first_available_port(
- server_port, server_port + TRY_NUM_PORTS
- )
+def start_server(interface, server_name=None, server_port=None, auth=None, ssl=None):
+ if server_name is None:
+ server_name = LOCALHOST_NAME
+ if server_port is None: # if port is not specified, start at 7860 and search for first available port
+ port = get_first_available_port(
+ INITIAL_PORT_VALUE, INITIAL_PORT_VALUE + TRY_NUM_PORTS
+ )
+ else:
+ try:
+ s = socket.socket() # create a socket object
+ s.bind((LOCALHOST_NAME, server_port)) # Bind to the port to see if it's available (otherwise, raise OSError)
+ s.close()
+ except OSError:
+ raise OSError("Port {} is in use. If a gradio.Interface is running on the port, you can close() it or gradio.close_all().".format(server_port))
+ port = server_port
+
url_host_name = "localhost" if server_name == "0.0.0.0" else server_name
path_to_local_server = "http://{}:{}/".format(url_host_name, port)
if auth is not None:
@@ -451,15 +462,13 @@ def start_server(interface, server_name, server_port, auth=None, ssl=None):
app.queue_thread.start()
if interface.save_to is not None:
interface.save_to["port"] = port
- app_kwargs = {"port": port, "host": server_name}
+ app_kwargs = {"app": app, "port": port, "host": server_name}
if ssl:
app_kwargs["ssl_context"] = ssl
- thread = threading.Thread(target=app.run,
- kwargs=app_kwargs,
- daemon=True)
+ server = make_server(**app_kwargs)
+ thread = threading.Thread(target=server.serve_forever, daemon=True)
thread.start()
-
- return port, path_to_local_server, app, thread
+ return port, path_to_local_server, app, thread, server
def get_state():
return session.get("state")
| diff --git a/test/test_interfaces.py b/test/test_interfaces.py
--- a/test/test_interfaces.py
+++ b/test/test_interfaces.py
@@ -10,6 +10,7 @@
from comet_ml import Experiment
import mlflow
import wandb
+import socket
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
@@ -24,10 +25,19 @@ def captured_output():
sys.stdout, sys.stderr = old_out, old_err
class TestInterface(unittest.TestCase):
- def test_reset_all(self):
+ def test_close(self):
+ io = Interface(lambda input: None, "textbox", "label")
+ _, local_url, _ = io.launch(prevent_thread_lock=True)
+ response = requests.get(local_url)
+ self.assertEqual(response.status_code, 200)
+ io.close()
+ with self.assertRaises(Exception):
+ response = requests.get(local_url)
+
+ def test_close_all(self):
interface = Interface(lambda input: None, "textbox", "label")
interface.close = mock.MagicMock()
- reset_all()
+ close_all()
interface.close.assert_called()
def test_examples_invalid_input(self):
diff --git a/test/test_networking.py b/test/test_networking.py
--- a/test/test_networking.py
+++ b/test/test_networking.py
@@ -199,7 +199,6 @@ def test_state_initialization(self):
def test_state_value(self):
io = gr.Interface(lambda x: len(x), "text", "label")
- io.launch(prevent_thread_lock=True)
app, _, _ = io.launch(prevent_thread_lock=True)
with app.test_request_context():
networking.set_state("test")
diff --git a/test/test_utils.py b/test/test_utils.py
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -101,20 +101,8 @@ def test_readme_to_html_doesnt_crash_on_connection_error(self, mock_get):
readme_to_html("placeholder")
def test_readme_to_html_correct_parse(self):
- readme_to_html("https://github.com/gradio-app/gradio/blob/master/README.md")
-
- def test_launch_counter(self):
- with tempfile.NamedTemporaryFile() as tmp:
- with mock.patch('gradio.utils.JSON_PATH', tmp.name):
- interface = gradio.Interface(lambda x: x, "textbox", "label")
- os.remove(tmp.name)
- interface.launch(prevent_thread_lock=True)
- with open(tmp.name) as j:
- self.assertEqual(json.load(j)['launches'], 1)
- interface.launch(prevent_thread_lock=True)
- with open(tmp.name) as j:
- self.assertEqual(json.load(j)['launches'], 2)
-
+ readme_to_html("https://github.com/gradio-app/gradio/blob/master/README.md")
+
if __name__ == '__main__':
unittest.main()
| [Suggestions] Don't autofind a port when a port is user specified
**Is your feature request related to a problem? Please describe.**
When using `iface = gr.Interface(.., server_port=5000)` if port `5000` is already used it will use port `5001` instead.
**Describe the solution you'd like**
If it could crash instead that would be better saying the port is already used.
**Additional context**
This was an issue when reloading regularly the gradio app and reloading pages on the browser
| Makes sense, we'll fix it! | 2021-12-17T16:44:31 |
gradio-app/gradio | 472 | gradio-app__gradio-472 | [
"447"
] | c1a23a75a4d679fd79e255955e453c0474c91ecb | diff --git a/gradio/app.py b/gradio/app.py
--- a/gradio/app.py
+++ b/gradio/app.py
@@ -2,6 +2,7 @@
from __future__ import annotations
from fastapi import FastAPI, Request, Depends, HTTPException, status
+from fastapi.concurrency import run_in_threadpool
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse, HTMLResponse, FileResponse
from fastapi.security import OAuth2PasswordRequestForm
@@ -37,6 +38,7 @@
allow_headers=["*"],
)
+
templates = Jinja2Templates(directory=STATIC_TEMPLATE_LIB)
@@ -178,23 +180,28 @@ async def predict(
if body.get("example_id") != None:
example_id = body["example_id"]
if app.interface.cache_examples:
- prediction = load_from_cache(app.interface, example_id)
+ prediction = await run_in_threadpool(
+ load_from_cache, app.interface, example_id)
durations = None
else:
- prediction, durations = process_example(app.interface, example_id)
+ prediction, durations = await run_in_threadpool(
+ process_example, app.interface, example_id)
else:
raw_input = body["data"]
if app.interface.show_error:
try:
- prediction, durations = app.interface.process(raw_input)
+ prediction, durations = await run_in_threadpool(
+ app.interface.process, raw_input)
except BaseException as error:
traceback.print_exc()
return JSONResponse(content={"error": str(error)},
status_code=500)
else:
- prediction, durations = app.interface.process(raw_input)
+ prediction, durations = await run_in_threadpool(
+ app.interface.process, raw_input)
if app.interface.allow_flagging == "auto":
- flag_index = app.interface.flagging_callback.flag(
+ flag_index = await run_in_threadpool(
+ app.interface.flagging_callback.flag,
app.interface, raw_input, prediction,
flag_option="" if app.interface.flagging_options else None,
username=username)
@@ -216,7 +223,8 @@ async def flag(
await utils.log_feature_analytics(app.interface.ip_address, 'flag')
body = await request.json()
data = body['data']
- app.interface.flagging_callback.flag(
+ await run_in_threadpool(
+ app.interface.flagging_callback.flag,
app.interface, data['input_data'], data['output_data'],
flag_option=data.get("flag_option"), flag_index=data.get("flag_index"),
username=username)
@@ -229,8 +237,8 @@ async def interpret(request: Request):
await utils.log_feature_analytics(app.interface.ip_address, 'interpret')
body = await request.json()
raw_input = body["data"]
- interpretation_scores, alternative_outputs = app.interface.interpret(
- raw_input)
+ interpretation_scores, alternative_outputs = await run_in_threadpool(
+ app.interface.interpret, raw_input)
return {
"interpretation_scores": interpretation_scores,
"alternative_outputs": alternative_outputs
diff --git a/gradio/networking.py b/gradio/networking.py
--- a/gradio/networking.py
+++ b/gradio/networking.py
@@ -147,7 +147,7 @@ def start_server(
app.queue_thread.start()
if interface.save_to is not None: # Used for selenium tests
interface.save_to["port"] = port
-
+
config = uvicorn.Config(app=app, port=port, host=server_name,
log_level="warning")
server = Server(config=config)
| Proper async handling in FastAPI app
**Is your feature request related to a problem? Please describe.**
In reviewing #440, @FarukOzderim brought up the fact that we currently make several synchronous blocking calls from within the FastAPI sever. This reduces the benefits we get from migrating to the asynchronous fasapi server.
**Describe the solution you'd like**
* call `log_feature_analytics()` asynchronously from `api/interpretation` and `api/flag`
* **the biggest one**: calling the prediction function itself asynchronously by spinning up another thread (https://www.aeracode.org/2018/02/19/python-async-simplified/)
| 2022-01-20T23:04:40 |
||
gradio-app/gradio | 601 | gradio-app__gradio-601 | [
"592"
] | 01f52bbd38cde7150e8e7d3ba2cc8bc1b1db6676 | diff --git a/website/homepage/render_html.py b/website/homepage/render_html.py
--- a/website/homepage/render_html.py
+++ b/website/homepage/render_html.py
@@ -151,14 +151,7 @@ def render_guides():
guide_output = guide_output.replace("<pre>", "<div class='code-block' style='display: flex'><pre>")
guide_output = guide_output.replace("</pre>", f"</pre>{copy_button}</div>")
- output_html = markdown2.markdown(guide_output, extras=["target-blank-links"])
-
- for match in re.findall(r"<h3>([A-Za-z0-9 ]*)<\/h3>", output_html):
- output_html = output_html.replace(
- f"<h3>{match}</h3>",
- f"<h3 id={match.lower().replace(' ', '_')}>{match}</h3>",
- )
-
+ output_html = markdown2.markdown(guide_output, extras=["target-blank-links", "header-ids"])
os.makedirs("generated", exist_ok=True)
os.makedirs(os.path.join("generated", guide["name"]), exist_ok=True)
with open(
| Add automatic anchor links to the different sections of getting started and guides automatically
Many times when I'm responding to issues, I want to link to a specific section in our getting started page. For example, the section related to "State"

We could manually add anchor links, but I'm sure there is a way to automatically turn any header tag into an anchorable link. Sometimes I've seen a little symbol next to the heading that you can click on to get the anchor link for that section, which would be super convenient.
Applies to the getting started as well as the guides.
| 2022-02-11T17:15:54 |
||
gradio-app/gradio | 660 | gradio-app__gradio-660 | [
"659"
] | cecb942263b6269e5b2698d5027a2899801560e1 | diff --git a/gradio/networking.py b/gradio/networking.py
--- a/gradio/networking.py
+++ b/gradio/networking.py
@@ -1,181 +1,180 @@
-"""
-Defines helper methods useful for setting up ports, launching servers, and
-creating tunnels.
-"""
-from __future__ import annotations
-
-import http
-import json
-import os
-import socket
-import threading
-import time
-import urllib.parse
-import urllib.request
-from typing import TYPE_CHECKING, Optional, Tuple
-
-import fastapi
-import requests
-import uvicorn
-
-from gradio import queueing
-from gradio.routes import app
-from gradio.tunneling import create_tunnel
-
-if TYPE_CHECKING: # Only import for type checking (to avoid circular imports).
- from gradio import Interface
-
-
-# By default, the local server will try to open on localhost, port 7860.
-# If that is not available, then it will try 7861, 7862, ... 7959.
-INITIAL_PORT_VALUE = int(os.getenv("GRADIO_SERVER_PORT", "7860"))
-TRY_NUM_PORTS = int(os.getenv("GRADIO_NUM_PORTS", "100"))
-LOCALHOST_NAME = os.getenv("GRADIO_SERVER_NAME", "127.0.0.1")
-GRADIO_API_SERVER = "https://api.gradio.app/v1/tunnel-request"
-
-
-class Server(uvicorn.Server):
- def install_signal_handlers(self):
- pass
-
- def run_in_thread(self):
- self.thread = threading.Thread(target=self.run, daemon=True)
- self.thread.start()
- while not self.started:
- time.sleep(1e-3)
-
- def close(self):
- self.should_exit = True
- self.thread.join()
-
-
-def get_first_available_port(initial: int, final: int) -> int:
- """
- Gets the first open port in a specified range of port numbers
- Parameters:
- initial: the initial value in the range of port numbers
- final: final (exclusive) value in the range of port numbers, should be greater than `initial`
- Returns:
- port: the first open port in the range
- """
- for port in range(initial, final):
- try:
- s = socket.socket() # create a socket object
- s.bind((LOCALHOST_NAME, port)) # Bind to the port
- s.close()
- return port
- except OSError:
- pass
- raise OSError(
- "All ports from {} to {} are in use. Please close a port.".format(
- initial, final
- )
- )
-
-
-def start_server(
- interface: Interface,
- server_name: Optional[str] = None,
- server_port: Optional[int] = None,
- ssl_keyfile: Optional[str] = None,
- ssl_certfile: Optional[str] = None,
-) -> Tuple[int, str, fastapi.FastAPI, threading.Thread, None]:
- """Launches a local server running the provided Interface
- Parameters:
- interface: The interface object to run on the server
- server_name: to make app accessible on local network, set this to "0.0.0.0". Can be set by environment variable GRADIO_SERVER_NAME.
- server_port: will start gradio app on this port (if available). Can be set by environment variable GRADIO_SERVER_PORT.
- auth: If provided, username and password (or list of username-password tuples) required to access interface. Can also provide function that takes username and password and returns True if valid login.
- ssl_keyfile: If a path to a file is provided, will use this as the private key file to create a local server running on https.
- ssl_certfile: If a path to a file is provided, will use this as the signed certificate for https. Needs to be provided if ssl_keyfile is provided.
- """
- server_name = server_name or LOCALHOST_NAME
- # if port is not specified, search for first available port
- if server_port is None:
- port = get_first_available_port(
- INITIAL_PORT_VALUE, INITIAL_PORT_VALUE + TRY_NUM_PORTS
- )
- else:
- try:
- s = socket.socket()
- s.bind((LOCALHOST_NAME, server_port))
- s.close()
- except OSError:
- raise OSError(
- "Port {} is in use. If a gradio.Interface is running on the port, you can close() it or gradio.close_all().".format(
- server_port
- )
- )
- port = server_port
-
- url_host_name = "localhost" if server_name == "0.0.0.0" else server_name
-
- if ssl_keyfile is not None:
- if ssl_certfile is None:
- raise ValueError(
- "ssl_certfile must be provided if ssl_keyfile is provided."
- )
- path_to_local_server = "https://{}:{}/".format(url_host_name, port)
- else:
- path_to_local_server = "http://{}:{}/".format(url_host_name, port)
-
- auth = interface.auth
- if auth is not None:
- if not callable(auth):
- app.auth = {account[0]: account[1] for account in auth}
- else:
- app.auth = auth
- else:
- app.auth = None
- app.interface = interface
- app.cwd = os.getcwd()
- app.favicon_path = interface.favicon_path
- app.tokens = {}
-
- if app.interface.enable_queue:
- if auth is not None or app.interface.encrypt:
- raise ValueError("Cannot queue with encryption or authentication enabled.")
- queueing.init()
- app.queue_thread = threading.Thread(
- target=queueing.queue_thread, args=(path_to_local_server,)
- )
- app.queue_thread.start()
- if interface.save_to is not None: # Used for selenium tests
- interface.save_to["port"] = port
-
- config = uvicorn.Config(
- app=app,
- port=port,
- host=server_name,
- log_level="warning",
- ssl_keyfile=ssl_keyfile,
- ssl_certfile=ssl_certfile,
- )
- server = Server(config=config)
- server.run_in_thread()
- return port, path_to_local_server, app, server
-
-
-def setup_tunnel(local_server_port: int, endpoint: str) -> str:
- response = requests.get(
- endpoint + "/v1/tunnel-request" if endpoint is not None else GRADIO_API_SERVER
- )
- if response and response.status_code == 200:
- try:
- payload = response.json()[0]
- return create_tunnel(payload, LOCALHOST_NAME, local_server_port)
- except Exception as e:
- raise RuntimeError(str(e))
- else:
- raise RuntimeError("Could not get share link from Gradio API Server.")
-
-
-def url_ok(url: str) -> bool:
- try:
- for _ in range(5):
- time.sleep(0.500)
- r = requests.head(url, timeout=3, verify=False)
- if r.status_code in (200, 401, 302): # 401 or 302 if auth is set
- return True
- except (ConnectionError, requests.exceptions.ConnectionError):
- return False
+"""
+Defines helper methods useful for setting up ports, launching servers, and
+creating tunnels.
+"""
+from __future__ import annotations
+
+import os
+import socket
+import threading
+import time
+import warnings
+from typing import TYPE_CHECKING, Optional, Tuple
+
+import fastapi
+import requests
+import uvicorn
+
+from gradio import queueing
+from gradio.routes import app
+from gradio.tunneling import create_tunnel
+
+if TYPE_CHECKING: # Only import for type checking (to avoid circular imports).
+ from gradio import Interface
+
+
+# By default, the local server will try to open on localhost, port 7860.
+# If that is not available, then it will try 7861, 7862, ... 7959.
+INITIAL_PORT_VALUE = int(os.getenv("GRADIO_SERVER_PORT", "7860"))
+TRY_NUM_PORTS = int(os.getenv("GRADIO_NUM_PORTS", "100"))
+LOCALHOST_NAME = os.getenv("GRADIO_SERVER_NAME", "127.0.0.1")
+GRADIO_API_SERVER = "https://api.gradio.app/v1/tunnel-request"
+
+
+class Server(uvicorn.Server):
+ def install_signal_handlers(self):
+ pass
+
+ def run_in_thread(self):
+ self.thread = threading.Thread(target=self.run, daemon=True)
+ self.thread.start()
+ while not self.started:
+ time.sleep(1e-3)
+
+ def close(self):
+ self.should_exit = True
+ self.thread.join()
+
+
+def get_first_available_port(initial: int, final: int) -> int:
+ """
+ Gets the first open port in a specified range of port numbers
+ Parameters:
+ initial: the initial value in the range of port numbers
+ final: final (exclusive) value in the range of port numbers, should be greater than `initial`
+ Returns:
+ port: the first open port in the range
+ """
+ for port in range(initial, final):
+ try:
+ s = socket.socket() # create a socket object
+ s.bind((LOCALHOST_NAME, port)) # Bind to the port
+ s.close()
+ return port
+ except OSError:
+ pass
+ raise OSError(
+ "All ports from {} to {} are in use. Please close a port.".format(
+ initial, final
+ )
+ )
+
+
+def start_server(
+ interface: Interface,
+ server_name: Optional[str] = None,
+ server_port: Optional[int] = None,
+ ssl_keyfile: Optional[str] = None,
+ ssl_certfile: Optional[str] = None,
+) -> Tuple[int, str, fastapi.FastAPI, threading.Thread, None]:
+ """Launches a local server running the provided Interface
+ Parameters:
+ interface: The interface object to run on the server
+ server_name: to make app accessible on local network, set this to "0.0.0.0". Can be set by environment variable GRADIO_SERVER_NAME.
+ server_port: will start gradio app on this port (if available). Can be set by environment variable GRADIO_SERVER_PORT.
+ auth: If provided, username and password (or list of username-password tuples) required to access interface. Can also provide function that takes username and password and returns True if valid login.
+ ssl_keyfile: If a path to a file is provided, will use this as the private key file to create a local server running on https.
+ ssl_certfile: If a path to a file is provided, will use this as the signed certificate for https. Needs to be provided if ssl_keyfile is provided.
+ """
+ server_name = server_name or LOCALHOST_NAME
+ # if port is not specified, search for first available port
+ if server_port is None:
+ port = get_first_available_port(
+ INITIAL_PORT_VALUE, INITIAL_PORT_VALUE + TRY_NUM_PORTS
+ )
+ else:
+ try:
+ s = socket.socket()
+ s.bind((LOCALHOST_NAME, server_port))
+ s.close()
+ except OSError:
+ raise OSError(
+ "Port {} is in use. If a gradio.Interface is running on the port, you can close() it or gradio.close_all().".format(
+ server_port
+ )
+ )
+ port = server_port
+
+ url_host_name = "localhost" if server_name == "0.0.0.0" else server_name
+
+ if ssl_keyfile is not None:
+ if ssl_certfile is None:
+ raise ValueError(
+ "ssl_certfile must be provided if ssl_keyfile is provided."
+ )
+ path_to_local_server = "https://{}:{}/".format(url_host_name, port)
+ else:
+ path_to_local_server = "http://{}:{}/".format(url_host_name, port)
+
+ auth = interface.auth
+ if auth is not None:
+ if not callable(auth):
+ app.auth = {account[0]: account[1] for account in auth}
+ else:
+ app.auth = auth
+ else:
+ app.auth = None
+ app.interface = interface
+ app.cwd = os.getcwd()
+ app.favicon_path = interface.favicon_path
+ app.tokens = {}
+
+ if app.interface.enable_queue:
+ if auth is not None or app.interface.encrypt:
+ raise ValueError("Cannot queue with encryption or authentication enabled.")
+ queueing.init()
+ app.queue_thread = threading.Thread(
+ target=queueing.queue_thread, args=(path_to_local_server,)
+ )
+ app.queue_thread.start()
+ if interface.save_to is not None: # Used for selenium tests
+ interface.save_to["port"] = port
+
+ config = uvicorn.Config(
+ app=app,
+ port=port,
+ host=server_name,
+ log_level="warning",
+ ssl_keyfile=ssl_keyfile,
+ ssl_certfile=ssl_certfile,
+ )
+ server = Server(config=config)
+ server.run_in_thread()
+ return port, path_to_local_server, app, server
+
+
+def setup_tunnel(local_server_port: int, endpoint: str) -> str:
+ response = requests.get(
+ endpoint + "/v1/tunnel-request" if endpoint is not None else GRADIO_API_SERVER
+ )
+ if response and response.status_code == 200:
+ try:
+ payload = response.json()[0]
+ return create_tunnel(payload, LOCALHOST_NAME, local_server_port)
+ except Exception as e:
+ raise RuntimeError(str(e))
+ else:
+ raise RuntimeError("Could not get share link from Gradio API Server.")
+
+
+def url_ok(url: str) -> bool:
+ try:
+ for _ in range(5):
+ time.sleep(0.500)
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore")
+ r = requests.head(url, timeout=3, verify=False)
+ if r.status_code in (200, 401, 302): # 401 or 302 if auth is set
+ return True
+ except (ConnectionError, requests.exceptions.ConnectionError):
+ return False
| Warning pops up with `share=True` with gradio 2.8.0
Launching an Interface with `share=True` causes this:

To reproduce, run
`gr.Interface(lambda x:x, "text", "text").launch(share=True)`
| 2022-02-18T00:17:43 |
||
gradio-app/gradio | 691 | gradio-app__gradio-691 | [
"663"
] | 80ddcf2e760ba5e99ee5ff07896c8f703a4de55a | diff --git a/gradio/interface.py b/gradio/interface.py
--- a/gradio/interface.py
+++ b/gradio/interface.py
@@ -629,6 +629,7 @@ def launch(
favicon_path: Optional[str] = None,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
+ ssl_keyfile_password: Optional[str] = None,
) -> Tuple[flask.Flask, str, str]:
"""
Launches the webserver that serves the UI for the interface.
@@ -653,6 +654,7 @@ def launch(
favicon_path (str): If a path to a file (.png, .gif, or .ico) is provided, it will be used as the favicon for the web page.
ssl_keyfile (str): If a path to a file is provided, will use this as the private key file to create a local server running on https.
ssl_certfile (str): If a path to a file is provided, will use this as the signed certificate for https. Needs to be provided if ssl_keyfile is provided.
+ ssl_keyfile_password (str): If a password is provided, will use this with the ssl certificate for https.
Returns:
app (flask.Flask): Flask app object
path_to_local_server (str): Locally accessible link
@@ -694,7 +696,12 @@ def launch(
cache_interface_examples(self)
server_port, path_to_local_server, app, server = networking.start_server(
- self, server_name, server_port, ssl_keyfile, ssl_certfile
+ self,
+ server_name,
+ server_port,
+ ssl_keyfile,
+ ssl_certfile,
+ ssl_keyfile_password,
)
self.local_url = path_to_local_server
diff --git a/gradio/networking.py b/gradio/networking.py
--- a/gradio/networking.py
+++ b/gradio/networking.py
@@ -76,6 +76,7 @@ def start_server(
server_port: Optional[int] = None,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
+ ssl_keyfile_password: Optional[str] = None,
) -> Tuple[int, str, fastapi.FastAPI, threading.Thread, None]:
"""Launches a local server running the provided Interface
Parameters:
@@ -85,6 +86,7 @@ def start_server(
auth: If provided, username and password (or list of username-password tuples) required to access interface. Can also provide function that takes username and password and returns True if valid login.
ssl_keyfile: If a path to a file is provided, will use this as the private key file to create a local server running on https.
ssl_certfile: If a path to a file is provided, will use this as the signed certificate for https. Needs to be provided if ssl_keyfile is provided.
+ ssl_keyfile_password (str): If a password is provided, will use this with the ssl certificate for https.
"""
server_name = server_name or LOCALHOST_NAME
# if port is not specified, search for first available port
@@ -147,6 +149,7 @@ def start_server(
log_level="warning",
ssl_keyfile=ssl_keyfile,
ssl_certfile=ssl_certfile,
+ ssl_keyfile_password=ssl_keyfile_password,
)
server = Server(config=config)
server.run_in_thread()
| about ssl_keyfile parameter in 2.8.0 version
I'm working on it for https with gradio. how can I implement ssl password?!
| Thanks for the catch @byeolkady. It looks like we need to add support for that so that it gets passed into the server..
Besides
`ssl_keyfile_password`
Do you need any of these parameters?
`ssl_version,`
`ssl_cert_reqs,`
`ssl_ca_certs,`
`ssl_ciphers`
First of all, I think I need only the password-related part. Then will that part be updated to version 2.9.0? | 2022-02-21T12:54:08 |
|
gradio-app/gradio | 710 | gradio-app__gradio-710 | [
"371"
] | 36045f5a95f940579619bceba9473cc4aeea69f5 | diff --git a/gradio/routes.py b/gradio/routes.py
--- a/gradio/routes.py
+++ b/gradio/routes.py
@@ -9,8 +9,9 @@
import secrets
import traceback
import urllib
-from typing import List, Optional, Type
+from typing import Any, List, Optional, Type
+import orjson
import pkg_resources
import uvicorn
from fastapi import Depends, FastAPI, HTTPException, Request, status
@@ -19,6 +20,7 @@
from fastapi.responses import FileResponse, HTMLResponse, JSONResponse
from fastapi.security import OAuth2PasswordRequestForm
from fastapi.templating import Jinja2Templates
+from jinja2.exceptions import TemplateNotFound
from starlette.responses import RedirectResponse
from gradio import encryptor, queueing, utils
@@ -37,7 +39,15 @@
VERSION
)
-app = FastAPI()
+
+class ORJSONResponse(JSONResponse):
+ media_type = "application/json"
+
+ def render(self, content: Any) -> bytes:
+ return orjson.dumps(content)
+
+
+app = FastAPI(default_response_class=ORJSONResponse)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
@@ -108,9 +118,15 @@ def main(request: Request, user: str = Depends(get_current_user)):
else:
config = {"auth_required": True, "auth_message": app.interface.auth_message}
- return templates.TemplateResponse(
- "frontend/index.html", {"request": request, "config": config}
- )
+ try:
+ return templates.TemplateResponse(
+ "frontend/index.html", {"request": request, "config": config}
+ )
+ except TemplateNotFound:
+ raise ValueError(
+ "Did you install Gradio from source files? You need to build "
+ "the frontend by running /scripts/build_frontend.sh"
+ )
@app.get("/config/", dependencies=[Depends(login_check)])
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -22,6 +22,7 @@
"markdown-it-py[linkify,plugins]",
"matplotlib",
"numpy",
+ "orjson",
"pandas",
"paramiko",
"pillow",
| BUG: DataFrame output cannot handle null values
#### Result:
pandas DataFrame doesn't display if more than 10 cells are to be shown initially
#### Expectation:
The DataFrame will be displayed
#### Versions:
gradio 2.2.15
Python 3.9.6
#### Reproducible Example
```
import gradio as gr
import pandas as pd
import plotly.express as px
def show_pens(alpha):
df_pens = pd.read_csv('https://raw.githubusercontent.com/mwaskom/seaborn-data/master/penguins.csv')
print(df_pens)
return df_pens.iloc[:5, :3]
iface = gr.Interface(
fn=show_pens,
inputs=['text'],
outputs=[gr.outputs.Dataframe()],
description="Table of Palmer Penguins"
)
iface.launch()
```
### Screenshot of failing output
<img width="1312" alt="Screen Shot 2021-11-16 at 7 59 47 AM" src="https://user-images.githubusercontent.com/7703961/141990015-616ed983-0c78-4b2c-9b35-9b031d7ec735.png">
### Console error screenshot
<img width="544" alt="Screen Shot 2021-11-16 at 8 05 56 AM" src="https://user-images.githubusercontent.com/7703961/141990591-d00b6bcd-7492-4628-ad40-47264e638f2a.png">
### Working example
Change the return statement to ` return df_pens.iloc[:3, :3]`
### Screenshot of working output
<img width="1286" alt="Screen Shot 2021-11-16 at 8 00 24 AM" src="https://user-images.githubusercontent.com/7703961/141990208-e9d1e011-c9e4-4117-a4ce-15f4099975b7.png">
| Interesting, @aliabid94 can you look into this?
WIll take a look, thanks @discdiver
Ah seems like there is null value in the dataframe that's throwing the rendered off - its not related to size of output. See image:

Fixing the null value issue now.
Thanks. Yep, there are a few nulls. So gradio can't display DataFrames with
null values? If that's the case, probably worth adding to the docs.
FWIW, there's probably a fix as that's not an issue on Streamlit.
On Tue, Nov 23, 2021 at 4:57 PM aliabid94 ***@***.***> wrote:
> Ah seems like there is null value in the dataframe that's throwing the
> rendered off - its not related to size of output. See image:
> [image: image]
> <https://user-images.githubusercontent.com/7870876/143135531-a15f4b70-3c00-4cf2-9de2-d98a3992926d.png>
>
> Fixing the null value issue now.
>
> —
> You are receiving this because you were mentioned.
> Reply to this email directly, view it on GitHub
> <https://github.com/gradio-app/gradio/issues/371#issuecomment-977203531>,
> or unsubscribe
> <https://github.com/notifications/unsubscribe-auth/AB2Y3GJKM36PSKREN2PG5A3UNQE5TANCNFSM5IEG63IQ>
> .
> Triage notifications on the go with GitHub Mobile for iOS
> <https://apps.apple.com/app/apple-store/id1477376905?ct=notification-email&mt=8&pt=524675>
> or Android
> <https://play.google.com/store/apps/details?id=com.github.android&referrer=utm_campaign%3Dnotification-email%26utm_medium%3Demail%26utm_source%3Dgithub>.
>
>
Yes there's a fix in the works, will be in the next release. Will update here when it's in.
Thanks!
On Tue, Nov 23, 2021 at 5:37 PM aliabid94 ***@***.***> wrote:
> Yes there's a fix in the works, will be in the next release. Will update
> here when it's in.
>
> —
> You are receiving this because you were mentioned.
> Reply to this email directly, view it on GitHub
> <https://github.com/gradio-app/gradio/issues/371#issuecomment-977235539>,
> or unsubscribe
> <https://github.com/notifications/unsubscribe-auth/AB2Y3GJPWJQWNICKBDTVIITUNQJRBANCNFSM5IEG63IQ>
> .
> Triage notifications on the go with GitHub Mobile for iOS
> <https://apps.apple.com/app/apple-store/id1477376905?ct=notification-email&mt=8&pt=524675>
> or Android
> <https://play.google.com/store/apps/details?id=com.github.android&referrer=utm_campaign%3Dnotification-email%26utm_medium%3Demail%26utm_source%3Dgithub>.
>
>
| 2022-02-22T20:32:35 |
|
gradio-app/gradio | 712 | gradio-app__gradio-712 | [
"707"
] | 93f372d98875c7b736c2a29e157f9c301bdbd2c1 | diff --git a/gradio/strings.py b/gradio/strings.py
--- a/gradio/strings.py
+++ b/gradio/strings.py
@@ -24,11 +24,12 @@
"INLINE_DISPLAY_BELOW": "Interface loading below...",
"MEDIA_PERMISSIONS_IN_COLAB": "Your interface requires microphone or webcam permissions - this may cause issues in Colab. Use the External URL in case of issues.",
"TIPS": [
- "You can add authentication to your app with the auth= kwarg in the launch command; for example: gr.Interface(...).launch(auth=('username', 'password'))",
- "Let users specify why they flagged input with the flagging_options= kwarg; for example: gr.Interface(..., flagging_options=['too slow', 'incorrect output', 'other'])",
- "You can show or hide the buttons for flagging, screenshots, and interpretation with the allow_*= kwargs; for example: gr.Interface(..., allow_screenshot=True, allow_flagging=False)",
+ "You can add authentication to your app with the `auth=` kwarg in the `launch()` command; for example: `gr.Interface(...).launch(auth=('username', 'password'))`",
+ "Let users specify why they flagged input with the `flagging_options=` kwarg; for example: `gr.Interface(..., flagging_options=['too slow', 'incorrect output', 'other'])`",
+ "You can show or hide the button for flagging with the `allow_flagging=` kwarg; for example: gr.Interface(..., allow_flagging=False)",
"The inputs and outputs flagged by the users are stored in the flagging directory, specified by the flagging_dir= kwarg. You can view this data through the interface by setting the examples= kwarg to the flagging directory; for example gr.Interface(..., examples='flagged')",
- "You can add a title and description to your interface using the title= and description= kwargs. The article= kwarg can be used to add markdown or HTML under the interface; for example gr.Interface(..., title='My app', description='Lorem ipsum')",
+ "You can add a title and description to your interface using the `title=` and `description=` kwargs. The `article=` kwarg can be used to add a description under the interface; for example gr.Interface(..., title='My app', description='Lorem ipsum'). Try using Markdown!",
+ "For a classification or regression model, set `interpretation='default'` to see why the model made a prediction.",
],
}
diff --git a/gradio/utils.py b/gradio/utils.py
--- a/gradio/utils.py
+++ b/gradio/utils.py
@@ -156,9 +156,9 @@ def readme_to_html(article: str) -> str:
def show_tip(interface: Interface) -> None:
- # Only show tip every other use.
- if interface.show_tips and random.random() < 0.5:
- print(random.choice(gradio.strings.en.TIPS))
+ if interface.show_tips and random.random() < 1.5:
+ tip: str = random.choice(gradio.strings.en["TIPS"])
+ print(f"Tip: {tip}")
def launch_counter() -> None:
| `Interface.launch()` with `show_tips=True` yields error
### Problem:
Using [this gradio colab](https://colab.research.google.com/drive/18ODkJvyxHutTN0P5APWyGFO_xwNcgHDZ?usp=sharing#scrollTo=BtS4nqLIW-dv), I added a few modifications, notably `show_tips=True` to the `launch` method of gradio.Interface.
Without the try | except block (as in the `main1` function below), this error shows up:
```
/usr/local/lib/python3.7/dist-packages/gradio/utils.py in show_tip(interface)
159 # Only show tip every other use.
160 if interface.show_tips and random.random() < 0.5:
--> 161 print(random.choice(gradio.strings.en.TIPS))
162
163
AttributeError: 'dict' object has no attribute 'TIPS'
```
### Reproducible example:
```
def greet(name):
return f"Hello {name}!" # py3.7!
# Original code in colab:
#gradio.Interface(greet, "text", "text").launch(share=True)
# My modifications:
iface = gradio.Interface(fn=greet,
inputs=gradio.inputs.Textbox(lines=2,
placeholder="Name Here"),
outputs="text",
examples=[['Tiger'],['Puma']]
)
# Functions to use to yield the error or trap it:
def main1():
iface.launch(show_tips=True)
def main2():
try:
iface.launch(show_tips=True)
except AttributeError:
pass
# Uncomment and run (possibly twice, re: traceback line 160) to get the error:
#main1()
# No issue when exception is trapped:
main2()
```
#### Device information:
- OS: Windows 10
- Browser: chrome
- Gradio version: 2.8.2
#### Additional context
- Context: gradio colab
| 2022-02-22T21:09:30 |
||
gradio-app/gradio | 724 | gradio-app__gradio-724 | [
"454"
] | a2b89ab2d249fe7434d180c2d0a1552003fd8a90 | diff --git a/demo/diff_texts/run.py b/demo/diff_texts/run.py
--- a/demo/diff_texts/run.py
+++ b/demo/diff_texts/run.py
@@ -19,7 +19,7 @@ def diff_texts(text1, text2):
),
gr.inputs.Textbox(lines=3, default="The fast brown fox jumps over lazy dogs."),
],
- gr.outputs.HighlightedText(),
+ gr.outputs.HighlightedText(color_map={'+': 'green', '-': 'pink'}),
)
if __name__ == "__main__":
iface.launch()
| diff --git a/test/golden/diff_texts/magic_trick.png b/test/golden/diff_texts/magic_trick.png
Binary files a/test/golden/diff_texts/magic_trick.png and b/test/golden/diff_texts/magic_trick.png differ
| Display bug with the version gradio 2.7.0 on HighlightedText
**Describe the bug**
I did describe the bug with notebook and screenshots in the Hugging Face forum: https://discuss.huggingface.co/t/how-to-install-a-specific-version-of-gradio-in-spaces/13552
**To Reproduce**
Run the notebook [diff_texts.ipynb](https://colab.research.google.com/drive/1NPdrvEVzt03GUrWVHGQGDx4TB0ZLPus6?usp=sharing).
**Screenshots**
With gradio 2.7.0

With gradio 2.6.4

| Thank you for raising the issue! We are fixing this issue right now
Thanks again for creating the issue. In case you need a workaround while we fix the issue, you can just run `gr.outputs.HighlightedText()` without passing in a custom `color_map` and it should work fine
The issue happens when you pass in a custom `color_map` to `gr.outputs.HighlightedText()`.
**Working example**:
```python
def diff_texts(text1, text2):
d = Differ()
return [
(token[2:], token[0] if token[0] != " " else None)
for token in d.compare(text1, text2)
]
iface = gr.Interface(
diff_texts,
[
gr.inputs.Textbox(
lines=3, default="The quick brown fox jumped over the lazy dogs."
),
gr.inputs.Textbox(lines=3, default="The fast brown fox jumps over lazy dogs."),
],
gr.outputs.HighlightedText(),
)
```

**Broken example**:
```python
def diff_texts(text1, text2):
d = Differ()
return [
(token[2:], token[0] if token[0] != " " else None)
for token in d.compare(text1, text2)
]
iface = gr.Interface(
diff_texts,
[
gr.inputs.Textbox(
lines=3, default="The quick brown fox jumped over the lazy dogs."
),
gr.inputs.Textbox(lines=3, default="The fast brown fox jumps over lazy dogs."),
],
gr.outputs.HighlightedText(color_map={'+': 'brown', '-': 'pink'}),
)
```
in which case the words that being highlighted no longer appear:

| 2022-02-23T15:19:56 |
gradio-app/gradio | 739 | gradio-app__gradio-739 | [
"734"
] | cc700b80042e576669979eda8ef4531983b62677 | diff --git a/gradio/interface.py b/gradio/interface.py
--- a/gradio/interface.py
+++ b/gradio/interface.py
@@ -123,7 +123,7 @@ def __init__(
css: Optional[str] = None,
height=None,
width=None,
- allow_screenshot: bool = True,
+ allow_screenshot: bool = False,
allow_flagging: Optional[str] = None,
flagging_options: List[str] = None,
encrypt=None,
@@ -135,7 +135,7 @@ def __init__(
enable_queue=None,
api_mode=None,
flagging_callback: FlaggingCallback = CSVLogger(),
- ):
+ ): # TODO: (faruk) Let's remove depreceated parameters in the version 3.0.0
"""
Parameters:
fn (Union[Callable, List[Callable]]): the function to wrap an interface around.
@@ -155,7 +155,7 @@ def __init__(
thumbnail (str): path to image or src to use as display picture for models listed in gradio.app/hub
theme (str): Theme to use - one of "default", "huggingface", "seafoam", "grass", "peach". Add "dark-" prefix, e.g. "dark-peach" for dark theme (or just "dark" for the default dark theme).
css (str): custom css or path to custom css file to use with interface.
- allow_screenshot (bool): if False, users will not see a button to take a screenshot of the interface.
+ allow_screenshot (bool): DEPRECATED if False, users will not see a button to take a screenshot of the interface.
allow_flagging (str): one of "never", "auto", or "manual". If "never" or "auto", users will not see a button to flag an input and output. If "manual", users will see a button to flag. If "auto", every prediction will be automatically flagged. If "manual", samples are flagged when the user clicks flag button. Can be set with environmental variable GRADIO_ALLOW_FLAGGING.
flagging_options (List[str]): if provided, allows user to select from the list of options when flagging. Only applies if allow_flagging is "manual".
encrypt (bool): DEPRECATED. If True, flagged data will be encrypted by key provided by creator at launch
@@ -217,6 +217,11 @@ def __init__(
"The `verbose` parameter in the `Interface`"
"is deprecated and has no effect."
)
+ if allow_screenshot:
+ warnings.warn(
+ "The `allow_screenshot` parameter in the `Interface`"
+ "is deprecated and has no effect."
+ )
self.status = "OFF"
self.live = live
@@ -277,6 +282,7 @@ def clean_html(raw_html):
self.thumbnail = thumbnail
theme = theme if theme is not None else os.getenv("GRADIO_THEME", "default")
+ self.is_space = True if os.getenv("SYSTEM") == "spaces" else False
DEPRECATED_THEME_MAP = {
"darkdefault": "default",
"darkhuggingface": "dark-huggingface",
@@ -731,6 +737,8 @@ def launch(
share = True
if share:
+ if self.is_space:
+ raise RuntimeError("Share is not supported when you are in Spaces")
try:
share_url = networking.setup_tunnel(server_port, private_endpoint)
self.share_url = share_url
@@ -791,6 +799,7 @@ def launch(
"api_mode": self.api_mode,
"server_name": server_name,
"server_port": server_port,
+ "is_spaces": self.is_space,
}
if self.analytics_enabled:
utils.launch_analytics(data)
| Share Breaks In Spaces
### Describe the bug
share=True usage in launch gives error in spaces
### Reproduction
https://huggingface.co/spaces/ruhwang2001/wonder-app/blob/main/app.py
`interface.launch(debug=True, share=True)`
### Screenshot
_No response_
### Logs
```shell
Running on local URL: http://localhost:7860/
█
*** Failed to connect to ec2.gradio.app:22: [Errno 110] Connection timed out
```
### System Info
```shell
Spaces
```
### Severity
critical
| @aliabid94 @aliabd @abidlabs As far as I can see in the codebase we are not aware if we are on HF Spaces in the interface class.
Shall we have a environment variable in spaces which will notify us we are using HF spaces? Just like utils.colab_check().
That makes sense. We can coordinate with the Spaces team to set up that environmental variable.
My suggestion would be that if this environmental variable is set **and `share=True`**, then we print a helpful warning to the user saying that for Spaces, `share` should be set to `False`.
(Instead of overriding `share` as I don't think we should override an explicit parameter with an implicit environmental variable)
Gonna make us of these env variables for this issue
good point! cc @cbensimon | 2022-02-25T08:26:09 |
|
gradio-app/gradio | 752 | gradio-app__gradio-752 | [
"751"
] | 907839926b3298cc3513920103def320e8f9d9e6 | diff --git a/demo/hello_world/run.py b/demo/hello_world/run.py
--- a/demo/hello_world/run.py
+++ b/demo/hello_world/run.py
@@ -7,4 +7,4 @@ def greet(name):
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
if __name__ == "__main__":
- iface.launch()
+ iface.launch();
diff --git a/demo/hello_world_2/run.py b/demo/hello_world_2/run.py
--- a/demo/hello_world_2/run.py
+++ b/demo/hello_world_2/run.py
@@ -11,4 +11,5 @@ def greet(name):
outputs="text",
)
if __name__ == "__main__":
- iface.launch()
+ app, path_to_local_server, share_url = iface.launch()
+
diff --git a/demo/hello_world_3/run.py b/demo/hello_world_3/run.py
--- a/demo/hello_world_3/run.py
+++ b/demo/hello_world_3/run.py
@@ -14,4 +14,5 @@ def greet(name, is_morning, temperature):
outputs=["text", "number"],
)
if __name__ == "__main__":
- iface.launch()
+ app, path_to_local_server, share_url = iface.launch()
+
| Update Launch Getting Started Guides
Usage of `iface.launch()` prints out `(<fastapi.applications.FastAPI at 0x7fef0dbbad10>,` in Google Colab and it is not fancy.
Let's update the first deliveries of `launch` in the getting_started guides so that people can adopt those usages.
| 2022-02-28T14:52:04 |
||
gradio-app/gradio | 782 | gradio-app__gradio-782 | [
"777"
] | 9e2cac6e4c1ed9b62968060bd5bf63e61f4e9c43 | diff --git a/gradio/interface.py b/gradio/interface.py
--- a/gradio/interface.py
+++ b/gradio/interface.py
@@ -28,7 +28,7 @@
from gradio.outputs import State as o_State # type: ignore
from gradio.outputs import get_output_instance
from gradio.process_examples import load_from_cache, process_example
-from gradio.routes import PredictRequest
+from gradio.routes import PredictBody
if TYPE_CHECKING: # Only import for type checking (is False at runtime).
import flask
@@ -559,7 +559,7 @@ def run_prediction(
else:
return predictions
- def process_api(self, data: PredictRequest, username: str = None) -> Dict[str, Any]:
+ def process_api(self, data: PredictBody, username: str = None) -> Dict[str, Any]:
flag_index = None
if data.example_id is not None:
if self.cache_examples:
diff --git a/gradio/queueing.py b/gradio/queueing.py
--- a/gradio/queueing.py
+++ b/gradio/queueing.py
@@ -7,6 +7,8 @@
import requests
+from gradio.routes import QueuePushBody
+
DB_FILE = "gradio_queue.db"
@@ -106,8 +108,9 @@ def pop() -> Tuple[int, str, Dict, str]:
return result[0], result[1], json.loads(result[2]), result[3]
-def push(input_data: Dict, action: str) -> Tuple[str, int]:
- input_data = json.dumps(input_data)
+def push(body: QueuePushBody) -> Tuple[str, int]:
+ action = body.action
+ input_data = json.dumps({"data": body.data})
hash = generate_hash()
conn = sqlite3.connect(DB_FILE)
c = conn.cursor()
diff --git a/gradio/routes.py b/gradio/routes.py
--- a/gradio/routes.py
+++ b/gradio/routes.py
@@ -66,13 +66,37 @@ def render(self, content: Any) -> bytes:
###########
-class PredictRequest(BaseModel):
+class PredictBody(BaseModel):
session_hash: Optional[str]
example_id: Optional[int]
- data: Any
+ data: List[Any]
state: Optional[Any]
+class FlagData(BaseModel):
+ input_data: List[Any]
+ output_data: List[Any]
+ flag_option: Optional[str]
+ flag_index: Optional[int]
+
+
+class FlagBody(BaseModel):
+ data: FlagData
+
+
+class InterpretBody(BaseModel):
+ data: List[Any]
+
+
+class QueueStatusBody(BaseModel):
+ hash: str
+
+
+class QueuePushBody(BaseModel):
+ action: str
+ data: Any
+
+
###########
# Auth
###########
@@ -224,7 +248,7 @@ def api_docs(request: Request):
@app.post("/api/predict/", dependencies=[Depends(login_check)])
-async def predict(body: PredictRequest, username: str = Depends(get_current_user)):
+async def predict(body: PredictBody, username: str = Depends(get_current_user)):
if app.launchable.stateful:
session_hash = body.session_hash
state = app.state_holder.get(
@@ -247,29 +271,26 @@ async def predict(body: PredictRequest, username: str = Depends(get_current_user
@app.post("/api/flag/", dependencies=[Depends(login_check)])
-async def flag(request: Request, username: str = Depends(get_current_user)):
+async def flag(body: FlagBody, username: str = Depends(get_current_user)):
if app.launchable.analytics_enabled:
await utils.log_feature_analytics(app.launchable.ip_address, "flag")
- body = await request.json()
- data = body["data"]
await run_in_threadpool(
app.launchable.flagging_callback.flag,
app.launchable,
- data["input_data"],
- data["output_data"],
- flag_option=data.get("flag_option"),
- flag_index=data.get("flag_index"),
+ body.data.input_data,
+ body.data.output_data,
+ flag_option=body.data.flag_option,
+ flag_index=body.data.flag_index,
username=username,
)
return {"success": True}
@app.post("/api/interpret/", dependencies=[Depends(login_check)])
-async def interpret(request: Request):
+async def interpret(body: InterpretBody):
if app.launchable.analytics_enabled:
await utils.log_feature_analytics(app.launchable.ip_address, "interpret")
- body = await request.json()
- raw_input = body["data"]
+ raw_input = body.data
interpretation_scores, alternative_outputs = await run_in_threadpool(
app.launchable.interpret, raw_input
)
@@ -280,18 +301,14 @@ async def interpret(request: Request):
@app.post("/api/queue/push/", dependencies=[Depends(login_check)])
-async def queue_push(request: Request):
- body = await request.json()
- action = body["action"]
- job_hash, queue_position = queueing.push(body, action)
+async def queue_push(body: QueuePushBody):
+ job_hash, queue_position = queueing.push(body)
return {"hash": job_hash, "queue_position": queue_position}
@app.post("/api/queue/status/", dependencies=[Depends(login_check)])
-async def queue_status(request: Request):
- body = await request.json()
- hash = body["hash"]
- status, data = queueing.get_status(hash)
+async def queue_status(body: QueueStatusBody):
+ status, data = queueing.get_status(body.hash)
return {"status": status, "data": data}
| diff --git a/test/test_external.py b/test/test_external.py
--- a/test/test_external.py
+++ b/test/test_external.py
@@ -214,7 +214,7 @@ def test_numerical_to_label_space(self):
def test_speech_recognition_model(self):
interface_info = gr.external.load_interface(
- "models/jonatasgrosman/wav2vec2-large-xlsr-53-english"
+ "models/facebook/wav2vec2-base-960h"
)
io = gr.Interface(**interface_info)
io.api_mode = True
diff --git a/test/test_queuing.py b/test/test_queuing.py
--- a/test/test_queuing.py
+++ b/test/test_queuing.py
@@ -4,6 +4,7 @@
import unittest
from gradio import queueing
+from gradio.routes import QueuePushBody
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
@@ -30,9 +31,11 @@ def test_hashing(self):
queueing.close()
def test_push_pop_status(self):
- hash1, position = queueing.push({"data": "test1"}, "predict")
+ request = QueuePushBody(data="test1", action="predict")
+ hash1, position = queueing.push(request)
self.assertEquals(position, 0)
- hash2, position = queueing.push({"data": "test2"}, "predict")
+ request = QueuePushBody(data="test2", action="predict")
+ hash2, position = queueing.push(request)
self.assertEquals(position, 1)
status, position = queueing.get_status(hash2)
self.assertEquals(status, "QUEUED")
@@ -43,8 +46,9 @@ def test_push_pop_status(self):
self.assertEquals(action, "predict")
def test_jobs(self):
- hash1, _ = queueing.push({"data": "test1"}, "predict")
- hash2, position = queueing.push({"data": "test1"}, "predict")
+ request = QueuePushBody(data="test1", action="predict")
+ hash1, _ = queueing.push(request)
+ hash2, position = queueing.push(request)
self.assertEquals(position, 1)
queueing.start_job(hash1)
| Update fastapi routes to use pydantic data models
There are several routes where we manually convert the request to a dictionary using e.g.:
```python
body = await request.json()
```
It is better to use FastAPI's built in support for pydantic data models, which allows automatic validation & typing.
| 2022-03-06T02:57:43 |
|
gradio-app/gradio | 826 | gradio-app__gradio-826 | [
"825"
] | 10e1f7c8fffb0db80a404ae0e4abb12c0496d9e1 | diff --git a/gradio/flagging.py b/gradio/flagging.py
--- a/gradio/flagging.py
+++ b/gradio/flagging.py
@@ -330,7 +330,7 @@ def flag(
for i, component in enumerate(interface.input_components):
component_label = interface.config["input_components"][i][
"label"
- ] or "Input_{}".format(i)
+ ] or "input_{}".format(i)
headers.append(component_label)
infos["flagged"]["features"][component_label] = {
"dtype": "string",
@@ -348,7 +348,7 @@ def flag(
for i, component in enumerate(interface.output_components):
component_label = interface.config["output_components"][i][
"label"
- ] or "Output_{}".format(i)
+ ] or "output_{}".format(i)
headers.append(component_label)
infos["flagged"]["features"][component_label] = {
"dtype": "string",
@@ -377,7 +377,7 @@ def flag(
for i, component in enumerate(interface.input_components):
label = interface.config["input_components"][i][
"label"
- ] or "Input_{}".format(i)
+ ] or "input_{}".format(i)
filepath = component.save_flagged(
self.dataset_dir, label, input_data[i], None
)
@@ -389,7 +389,7 @@ def flag(
for i, component in enumerate(interface.output_components):
label = interface.config["output_components"][i][
"label"
- ] or "Output_{}".format(i)
+ ] or "output_{}".format(i)
filepath = (
component.save_flagged(
self.dataset_dir, label, output_data[i], None
diff --git a/gradio/utils.py b/gradio/utils.py
--- a/gradio/utils.py
+++ b/gradio/utils.py
@@ -215,29 +215,34 @@ def get_config_file(interface: Interface) -> Dict[str, Any]:
}
try:
param_names = inspect.getfullargspec(interface.predict[0])[0]
- for iface, param in zip(config["input_components"], param_names):
- if not iface["label"]:
- iface["label"] = param.replace("_", " ")
- for i, iface in enumerate(config["output_components"]):
+ for index, component in enumerate(config["input_components"]):
+ if not component["label"]:
+ if index < len(param_names):
+ component["label"] = param_names[index].replace("_", " ")
+ else:
+ component["label"] = (
+ f"input {index + 1}"
+ if len(config["input_components"]) > 1
+ else "input"
+ )
+ for index, component in enumerate(config["output_components"]):
outputs_per_function = int(
len(interface.output_components) / len(interface.predict)
)
- function_index = i // outputs_per_function
- component_index = i - function_index * outputs_per_function
- ret_name = (
- "Output " + str(component_index + 1)
- if outputs_per_function > 1
- else "Output"
- )
- if iface["label"] is None:
- iface["label"] = ret_name
+ function_index = index // outputs_per_function
+ component_index = index - function_index * outputs_per_function
+ if component["label"] is None:
+ component["label"] = (
+ f"output {component_index + 1}"
+ if outputs_per_function > 1
+ else "output"
+ )
if len(interface.predict) > 1:
- iface["label"] = (
+ component["label"] = (
interface.function_names[function_index].replace("_", " ")
+ ": "
- + iface["label"]
+ + component["label"]
)
-
except ValueError:
pass
if interface.examples is not None:
| A few small component label issues
### Describe the bug
As I was creating demos for the course, I noticed some small issues related to the labels of the components:
* The label of an input component is sometimes `null` (by default, it should be "Input")
* The default input component label ("Input") and output component label ("Output") should both be lower-cased to be consistent e.g. with parameters, which are typically lower case
### Reproduction
For one such example, run:
```python
gr.Interface.load("spaces/abidlabs/remove-bg", inputs="webcam", title="Remove your webcam background!").launch()
```
### Screenshot

### Logs
_No response_
### System Info
```shell
2.8.10
```
### Severity
annoyance
| 2022-03-16T23:10:38 |
||
gradio-app/gradio | 844 | gradio-app__gradio-844 | [
"723"
] | e8e8439cb1fe773e4ae83abe37afb7898550084e | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -19,77 +19,33 @@ def __init__(self):
Context.root_block.blocks[self._id] = self
self.events = []
- def click(self, fn, inputs, outputs):
- if not isinstance(inputs, list):
- inputs = [inputs]
- if not isinstance(outputs, list):
- outputs = [outputs]
- Context.root_block.fns.append(fn)
- Context.root_block.dependencies.append(
- {
- "id": len(Context.root_block.dependencies),
- "targets": [self._id],
- "trigger": "click",
- "inputs": [block._id for block in inputs],
- "outputs": [block._id for block in outputs],
- }
- )
-
- def change(
- self, fn: str, inputs: List["Component"], outputs: List["Component"]
+ def set_event_trigger(
+ self,
+ event_name: str,
+ fn: Callable,
+ inputs: List[Component],
+ outputs: List[Component],
) -> None:
"""
- Adds change event to the component's dependencies.
-
- Whenever the component changes the function is triggered.
-
+ Adds an event to the component's dependencies.
Parameters:
- fn: function name
+ event_name: event name
+ fn: Callable function
inputs: input list
outputs: output list
-
Returns: None
-
"""
+ # Support for singular parameter
if not isinstance(inputs, list):
inputs = [inputs]
if not isinstance(outputs, list):
outputs = [outputs]
- Context.root_block.fns.append(fn)
- Context.root_block.dependencies.append(
- {
- "targets": [self._id],
- "trigger": "change",
- "inputs": [block._id for block in inputs],
- "outputs": [block._id for block in outputs],
- }
- )
- def save(
- self, fn: str, inputs: List["Component"], outputs: List["Component"]
- ) -> None:
- """
- Adds save event to the component's dependencies.
-
- Whenever the component is saved the function is triggered.
-
- Parameters:
- fn: function name
- inputs: input list
- outputs: output list
-
- Returns: None
-
- """
- if not isinstance(inputs, list):
- inputs = [inputs]
- if not isinstance(outputs, list):
- outputs = [outputs]
Context.root_block.fns.append(fn)
Context.root_block.dependencies.append(
{
"targets": [self._id],
- "trigger": "save",
+ "trigger": event_name,
"inputs": [block._id for block in inputs],
"outputs": [block._id for block in outputs],
}
@@ -120,7 +76,15 @@ def get_template_context(self):
class Tabs(BlockContext):
- pass
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
class TabItem(BlockContext):
@@ -131,6 +95,16 @@ def __init__(self, label):
def get_template_context(self):
return {"label": self.label}
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
class Blocks(Launchable, BlockContext):
def __init__(self, theme="default"):
diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -9,7 +9,7 @@
import tempfile
import warnings
from types import ModuleType
-from typing import Any, Dict, List, Optional, Tuple
+from typing import Any, Callable, Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
@@ -358,6 +358,26 @@ def deserialize(self, x):
"""
return x
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
+ def submit(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("submit", fn, inputs, outputs)
+
class Number(Component):
"""
@@ -473,6 +493,26 @@ def deserialize(self, y):
"""
return y
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
+ def submit(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("submit", fn, inputs, outputs)
+
class Slider(Component):
"""
@@ -584,6 +624,16 @@ def deserialize(self, y):
"""
return y
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
class Checkbox(Component):
"""
@@ -667,6 +717,16 @@ def deserialize(self, x):
"""
return x
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
class CheckboxGroup(Component):
"""
@@ -784,6 +844,16 @@ def deserialize(self, x):
"""
return x
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
class Radio(Component):
"""
@@ -876,6 +946,16 @@ def deserialize(self, x):
"""
return x
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
class Dropdown(Radio):
"""
@@ -1211,6 +1291,36 @@ def deserialize(self, x):
y = processing_utils.decode_base64_to_file(x).name
return y
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
+ def edit(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("edit", fn, inputs, outputs)
+
+ def clear(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("clear", fn, inputs, outputs)
+
class Video(Component):
"""
@@ -1323,6 +1433,56 @@ def postprocess(self, y):
def deserialize(self, x):
return processing_utils.decode_base64_to_file(x).name
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
+ def clear(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("clear", fn, inputs, outputs)
+
+ def play(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("play", fn, inputs, outputs)
+
+ def pause(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("pause", fn, inputs, outputs)
+
+ def stop(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("stop", fn, inputs, outputs)
+
class Audio(Component):
"""
@@ -1565,6 +1725,66 @@ def postprocess(self, y):
def deserialize(self, x):
return processing_utils.decode_base64_to_file(x).name
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
+ def edit(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("edit", fn, inputs, outputs)
+
+ def clear(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("clear", fn, inputs, outputs)
+
+ def play(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("play", fn, inputs, outputs)
+
+ def pause(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("pause", fn, inputs, outputs)
+
+ def stop(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("stop", fn, inputs, outputs)
+
class File(Component):
"""
@@ -1683,6 +1903,26 @@ def postprocess(self, y):
"data": processing_utils.encode_file_to_base64(y),
}
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
+ def clear(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("clear", fn, inputs, outputs)
+
class Dataframe(Component):
"""
@@ -1848,6 +2088,16 @@ def postprocess(self, y):
+ ". Please choose from: 'pandas', 'numpy', 'array'."
)
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
class Timeseries(Component):
"""
@@ -1937,6 +2187,16 @@ def postprocess(self, y):
"""
return {"headers": y.columns.values.tolist(), "data": y.values.tolist()}
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
class State(Component):
"""
@@ -2071,6 +2331,35 @@ def restore_flagged(self, dir, data, encryption_key):
except ValueError:
return data
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
+
+class KeyValues(Component):
+ """
+ Component displays a table representing values for multiple fields.
+ Output type: Union[Dict, List[Tuple[str, Union[str, int, float]]]]
+ Demos: text_analysis
+ """
+
+ def __init__(self, default: str = " ", *, label: Optional[str] = None, **kwargs):
+ """
+ Parameters:
+ default (str): IGNORED
+ label (str): component name in interface.
+ """
+ raise DeprecationWarning(
+ "The KeyValues component is deprecated. Please use the DataFrame or JSON "
+ "components instead."
+ )
+
class HighlightedText(Component):
"""
@@ -2129,6 +2418,16 @@ def save_flagged(self, dir, label, data, encryption_key):
def restore_flagged(self, dir, data, encryption_key):
return json.loads(data)
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
class JSON(Component):
"""
@@ -2171,6 +2470,16 @@ def save_flagged(self, dir, label, data, encryption_key):
def restore_flagged(self, dir, data, encryption_key):
return json.loads(data)
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
class HTML(Component):
"""
@@ -2202,6 +2511,16 @@ def get_shortcut_implementations(cls):
"html": {},
}
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
class Carousel(Component):
"""
@@ -2281,6 +2600,16 @@ def restore_flagged(self, dir, data, encryption_key):
for sample_set in json.loads(data)
]
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
class Chatbot(Component):
"""
@@ -2316,6 +2645,16 @@ def postprocess(self, y):
"""
return y
+ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("change", fn, inputs, outputs)
+
# Static Components
class Markdown(Component):
@@ -2327,6 +2666,16 @@ class Button(Component):
def __init__(self, default_value: str = "", *, label: str, **kwargs):
super().__init__(label=label, **kwargs)
+ def click(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ """
+ Parameters:
+ fn: Callable function
+ inputs: List of inputs
+ outputs: List of outputs
+ Returns: None
+ """
+ self.set_event_trigger("click", fn, inputs, outputs)
+
# TODO: (faruk) does this take component or interface as a input?
# see this line in Carousel
| [Blocks] Implement other event triggers besides `click` & support event triggers across multiple components
Currently, the only event trigger we support is `click`.
1. Can we add support additional event triggers? Some of these will be component specific, but something like an "on change" event would be useful for many different components. For example, in my Block below, I would like to run the function any time the input sliders change value
```python
block = gr.Blocks()
with block:
gr.Markdown("Let's do some kinematics! Choose the speed and angle to see the trajectory.")
with gr.Row():
speed = gr.inputs.Slider(1, 30, default=25)
angle = gr.inputs.Slider(0, 90, default=45)
output = gr.outputs.Image(type="plot")
btn = gr.Button("Run")
btn.click(plot, [speed, angle], output)
block.launch()
```
2. It would be nice if I could easily assign a trigger to "if any of these two sliders change" without having to write two separate event trigger lines
3. It would be _really_ nice if I could just say something like "if anything changes in this Block" re-run all of the function(s). I would assume that many of our users want this functionality for their blocks. Could we have a parameter in `gr.Blocks()` like `live=True`. In other words, if you run `gr.Blocks(live=True)`, then you have an automatically responsive Block in which if any of the input components change, all of the functions are re-run and the outputs automatically refresh.
| Just came across this one when trying to add [Quickdraw](https://gradio.app/building_a_pictionary_app/) as a tab! +1 to this.
**Proposed Component Events**
| Component | Event | When? | Notes |
|---------------------|-----------|-----------------------------------|-------------------------------|
| **Audio** | `change` | audio source changes | should this be `on:data`? |
| | `edit` | audio is edited (trimmed) | |
| | `clear` | audio source is reset to nothing | |
| | `play` | audio is played | |
| | `pause` | audio is paused | |
| | `stop` | audio is stopped | |
| **Button** | `click` | button is clicked | |
| **Carousel** | `change` | current item changes | |
| **ChatBot** | `change` | chatbot posts a message | |
| **Checkbox** | `change` | checkbox is checked/ unchecked | |
| **CheckboxGroup** | `change` | checkbox is checked/ unchecked | |
| **Column** | | | static layout component (nyi) |
| **DataFrame** | `change` | data changes | should this be `on:data`? |
| **Dropdown** | `change` | selected option changes | |
| **File** | `change` | file data changes | should this be `on:data`? |
| | `clear` | video source is reset to no value | |
| **Grid** | | | static layout component (nyi) |
| **HighlightedText** | `change` | content is passed in | output only |
| **HTML** | `change` | content is passed in | output only |
| **Image** | `change` | image source changes | should this be `on:data`? |
| | `edit` | image is edited | |
| | `clear` | image source is reset to nothing | |
| **JSON** | `change` | content is passed in | output only |
| **KeyValue** | `change` | content is passed in | output only |
| **Label** | `change` | content is passed in | output only |
| **Number** | `change` | number value changes | |
| | `submit` | form is submitted | |
| **Radio** | `change` | selected option changes | |
| **Row** | | | static layout component |
| **Slider** | `change` | current value changes | |
| **Tabs** | `change` | selected tab changes | |
| **Textbox** | `change` | text value changes | |
| | `submit` | form is submitted | |
| **Timeseries** | `change` | chart data changes | should this `on:data`? |
| **Video** | `change` | video source changes | should this be `on:data`? |
| | `clear` | video source is reset to no value | |
| | `play` | video is played | |
| | `pause` | video is paused | |
| | `stop` | video is stopped | |
I think `change` is better than `data` thinking about it.`data` kind of makes sense the first time something is loaded but for edits, it makes less sense.
@pngwn could you elaborate on this, how can user reset the video source?
**video source is reset to no value**
Click a button and clear the value / set it to null. The reason for the separate event from `change` is because you typically won’t want to run a prediction on a null value.
> I think change is better than data thinking about it.data kind of makes sense the first time something is loaded but for edits, it makes less sense.
I find `change()` more intuitive as well
A few suggestions:
* For the `textbox`, can we support an event that detects if the user presses the "enter" button? We've heard from users directly and in issues (#681) that they want to submit the data by pressing enter
* I don't see `Image` here but I imagine it should support: `change()`, `clear()`, and perhaps `edit()` if the image is edited using the editor tools
* A couple of currently-output components are missing: [KeyValues](https://gradio.app/docs/#o_keyvalues), [HighlightedText](https://gradio.app/docs/#o_highlightedtext), which should support `change()`
* What about supporting a `.play()` event for `Audio` and `Video`?
* Nit: consider renaming `Chatbot.message()` to `Chatbot.change()` for consistency
- A`submit` even would be fine, I'll add that to `Number` and `Textbox`.
- Yeah I missed `Image`, I think it would be the same as Files, etc. `change` would cover edits but we can add the more specific `edit` event too.
- We can add all of the audio control events for `Audio` and `Video`. play/pause/stop. `Audio` will need `edit` as well (if we are adding it to Image)
- `HighlightedText` is in the table, `KeyValues` is not, I did n't know about that one.
- I'll change `Chatbot` from `message` to `change`.
We have a sketch component, would drawing update be `change `as well?
@FarukOzderim yeah it would. Technically that is just an update to the image source as Sketch is a variant of the image component rather than a component in its own right.
This is great stuff, love it.
Shouldn't "submit" apply to chatbot too? When a user presses enter. And "change" if a user types a single letter?
No because the `Chatbot` component only displays messages like this:

(Doesn't include the input textbox)
Updated the table and implemented all of the events on the frontend in #807.
We can use the table above as a source of truth for syncing front and backend but also for documentation. We should just keep editing it as anything changes.
> Updated the table and implemented all of the events on the frontend in #807.
>
> We can use the table above as a source of truth for syncing front and backend but also for documentation. We should just keep editing it as anything changes.
Let's reference the changes if we edit the table in the future as it would make further updates easier. | 2022-03-21T14:35:22 |
|
gradio-app/gradio | 866 | gradio-app__gradio-866 | [
"790"
] | 2260c3fc3ed2ec1a2dc739eaa4f5c27c778fbf0e | diff --git a/demo/input-output/run.py b/demo/input-output/run.py
new file mode 100644
--- /dev/null
+++ b/demo/input-output/run.py
@@ -0,0 +1,16 @@
+import gradio as gr
+
+
+def image_mod(text):
+ return text[::-1]
+
+block = gr.Blocks()
+
+with block:
+ text = gr.Textbox()
+ btn = gr.Button("Run")
+ btn.click(image_mod, text, text)
+
+print(block.get_config_file())
+if __name__ == "__main__":
+ block.launch()
| diff --git a/ui/packages/app/test/input-output.spec.ts b/ui/packages/app/test/input-output.spec.ts
new file mode 100644
--- /dev/null
+++ b/ui/packages/app/test/input-output.spec.ts
@@ -0,0 +1,44 @@
+import { test, expect, Page } from "@playwright/test";
+
+function mock_demo(page: Page, demo: string) {
+ return page.route("http://localhost:7860/config", (route) => {
+ return route.fulfill({
+ headers: {
+ "Access-Control-Allow-Origin": "*"
+ },
+ path: `../../../demo/${demo}/config.json`
+ });
+ });
+}
+
+function mock_api(page: Page, body: Array<unknown>) {
+ return page.route("http://localhost:7860/api/predict/", (route) => {
+ const id = JSON.parse(route.request().postData()!).fn_index;
+ return route.fulfill({
+ headers: {
+ "Access-Control-Allow-Origin": "*"
+ },
+ body: JSON.stringify({
+ data: body[id]
+ })
+ });
+ });
+}
+
+test("a component acts as both input and output", async ({ page }) => {
+ await mock_demo(page, "input-output");
+ await mock_api(page, [["world hello"]]);
+ await page.goto("http://localhost:3000");
+
+ const textbox = await page.locator(".input-text");
+ const button = await page.locator("button");
+
+ await textbox.fill("hello world");
+
+ await Promise.all([
+ button.click(),
+ page.waitForResponse("http://localhost:7860/api/predict/")
+ ]);
+
+ await expect(await page.inputValue(".input-text")).toEqual("world hello");
+});
| Overwriting input image with output image ?
Hi, how can we show the output image at the place of input image, this means after model inference overwriting the resulted image (OUTPUT) with the input image instead of showing in parallel ?
Thanks
| This is a good suggestion and I think it would allow for to some cool demos. @pngwn @FarukOzderim can we design Blocks so that if the same component is provided as the input and output, the result of the model inference would overwrite the input? E.g.
```python
im = components.Image()
btn.click(fn, im, im) # the output of fn overwrites the image used for the input
```
I think we could also build a nice higher-level abstraction `UnifiedInterface` which is designed for these use cases.
I think this will be possible when components are merged into one.
Yeah, i think this should just work with the blocks changes but don't quote me on that. Definitely an interesting usecase and something we will want to capture in our tests.
Added it to test workflows! | 2022-03-24T11:00:32 |
gradio-app/gradio | 929 | gradio-app__gradio-929 | [
"881"
] | 03838573c9716ff9eec3fee1ba1cc0fc69849f4b | diff --git a/demo/blocks_inputs/run.py b/demo/blocks_inputs/run.py
new file mode 100644
--- /dev/null
+++ b/demo/blocks_inputs/run.py
@@ -0,0 +1,20 @@
+import gradio as gr
+
+str = """Hello friends
+hello friends
+
+Hello friends
+
+"""
+
+
+
+with gr.Blocks() as demo:
+ txt = gr.Textbox(label="Input", lines=5)
+ txt_2 = gr.Textbox(label="Output")
+ txt_3 = gr.Textbox(str, label="Output")
+ btn = gr.Button('Submit')
+ btn.click(lambda a : a, inputs=[txt], outputs=[txt_2])
+
+if __name__ == "__main__":
+ demo.launch()
| [`blocks-dev`] Default value of Checkbox does not show up
| Is this a checklist to mark when a component is testing for working without an issue?
That's honestly a good idea. Would probably subsume many of the individual issues that I created and easier to manage. Feel free to create it!
Not sure if this fits into this issue or a separate issue, but I noticed that the `default_value` for a `Checkbox` doesn't show up. (It does for `CheckboxGroup`, `Radio`, etc.). Try:
```
bl = gr.Blocks()
with bl:
gr.Checkbox("vegetarian")
bl.launch()
```
I don't think this issue has been that useful tbh. Renaming to only focus on the last thing that I mentioned.
@pngwn I think there are still some issues with Checkbox:
<img width="729" alt="image" src="https://user-images.githubusercontent.com/1778297/161813673-1ac69bc6-3d98-4ae7-9c53-2c16f0cc78fd.png">
* The `label` gets assigned to both the label and the associated text. The correct behavior is that the `default_value` parameter should be the associated text
* The Checkbox seems to pass in the default_value. I think the correct behavior is that it should pass in a boolean: True or False, depending on it's checked.
Compare with CheckboxGroup, which is behaving correctly:
<img width="759" alt="image" src="https://user-images.githubusercontent.com/1778297/161813912-3daee717-46b5-4201-9ada-09b883891175.png">
| 2022-04-06T13:48:59 |
|
gradio-app/gradio | 947 | gradio-app__gradio-947 | [
"705"
] | 3c876c49efa05513e7e77cd597ca70f1575b284f | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -158,16 +158,30 @@ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]
class Blocks(Launchable, BlockContext):
- def __init__(self, theme="default"):
+ def __init__(
+ self,
+ theme: str = "default",
+ analytics_enabled: Optional[bool] = None,
+ mode: str = "blocks",
+ ):
+
# Cleanup shared parameters with Interface
self.save_to = None
self.ip_address = utils.get_local_ip_address()
self.api_mode = False
- self.analytics_enabled = True
self.theme = theme
self.requires_permissions = False # TODO: needs to be implemented
self.enable_queue = False
self.is_space = True if os.getenv("SYSTEM") == "spaces" else False
+ self.mode = mode
+
+ # For analytics_enabled and allow_flagging: (1) first check for
+ # parameter, (2) check for env variable, (3) default to True/"manual"
+ self.analytics_enabled = (
+ analytics_enabled
+ if analytics_enabled is not None
+ else os.getenv("GRADIO_ANALYTICS_ENABLED", "True") == "True"
+ )
super().__init__()
self.blocks = {}
diff --git a/gradio/interface.py b/gradio/interface.py
--- a/gradio/interface.py
+++ b/gradio/interface.py
@@ -166,7 +166,7 @@ def __init__(
server_name (str): DEPRECATED. Name of the server to use for serving the interface - pass in launch() instead.
server_port (int): DEPRECATED. Port of the server to use for serving the interface - pass in launch() instead.
"""
- super().__init__()
+ super().__init__(analytics_enabled=analytics_enabled, mode="interface")
if not isinstance(fn, list):
fn = [fn]
@@ -394,7 +394,6 @@ def clean_html(raw_html):
self.share = None
self.share_url = None
self.local_url = None
- self.ip_address = utils.get_local_ip_address()
if show_tips is not None:
warnings.warn(
diff --git a/gradio/launchable.py b/gradio/launchable.py
--- a/gradio/launchable.py
+++ b/gradio/launchable.py
@@ -197,13 +197,14 @@ def launch(
"is_google_colab": is_colab,
"is_sharing_on": share,
"share_url": share_url,
- "ip_address": self.ip_address,
+ "ip_address": self.ip_address if hasattr(self, "ip_address") else "",
"enable_queue": self.enable_queue,
"show_tips": self.show_tips,
- "api_mode": self.api_mode,
+ "api_mode": self.api_mode if hasattr(self, "api_mode") else "",
"server_name": server_name,
"server_port": server_port,
- "is_spaces": self.is_space,
+ "is_spaces": self.is_space if hasattr(self, "is_space") else "",
+ "mode": self.mode if hasattr(self, "mode") else "",
}
if self.analytics_enabled:
utils.launch_analytics(data)
| [Blocks] Blocks-specifics Analytics
Our segment analytics should differentiate based on what kind of `Launchable()` is launched: `Block` vs. `Interface` vs. others in the future.
In addition, are there other, Block-specific analytics we want to measure?
* What kind of components are being used?
* What kind of static components are being used?
* More error logging? I anticipate that a lot more errors will happen with Blocks compared to regular Interfaces.
| 2022-04-07T21:19:03 |
||
gradio-app/gradio | 981 | gradio-app__gradio-981 | [
"921"
] | ceea8ce3ca05d6e78cd471eca9dbf89f08da77bf | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -2033,23 +2033,21 @@ def __init__(
self.col_width = col_width
self.type = type
self.output_type = "auto"
- self.default_value = (
- default_value
- if default_value is not None
- else [[None for _ in range(self.col_count)] for _ in range(self.row_count)]
- )
- sample_values = {
- "str": "abc",
- "number": 786,
- "bool": True,
- "date": "02/08/1993",
+ default_values = {
+ "str": "",
+ "number": 0,
+ "bool": False,
+ "date": "01/01/1970",
}
column_dtypes = (
[datatype] * self.col_count if isinstance(datatype, str) else datatype
)
self.test_input = [
- [sample_values[c] for c in column_dtypes] for _ in range(row_count)
+ [default_values[c] for c in column_dtypes] for _ in range(row_count)
]
+ self.default_value = (
+ default_value if default_value is not None else self.test_input
+ )
self.max_rows = max_rows
self.max_cols = max_cols
self.overflow_row_behaviour = overflow_row_behaviour
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -832,9 +832,9 @@ def test_component_functions(self):
"col_count": 3,
"col_width": None,
"default_value": [
- [None, None, None],
- [None, None, None],
- [None, None, None],
+ ["", "", ""],
+ ["", "", ""],
+ ["", "", ""],
],
"name": "dataframe",
"label": "Dataframe Input",
@@ -882,9 +882,9 @@ def test_component_functions(self):
"col_count": 3,
"col_width": None,
"default_value": [
- [None, None, None],
- [None, None, None],
- [None, None, None],
+ ["", "", ""],
+ ["", "", ""],
+ ["", "", ""],
],
},
)
diff --git a/test/test_inputs.py b/test/test_inputs.py
--- a/test/test_inputs.py
+++ b/test/test_inputs.py
@@ -594,9 +594,9 @@ def test_as_component(self):
"col_count": 3,
"col_width": None,
"default_value": [
- [None, None, None],
- [None, None, None],
- [None, None, None],
+ ["", "", ""],
+ ["", "", ""],
+ ["", "", ""],
],
"name": "dataframe",
"label": "Dataframe Input",
diff --git a/test/test_outputs.py b/test/test_outputs.py
--- a/test/test_outputs.py
+++ b/test/test_outputs.py
@@ -369,9 +369,9 @@ def test_as_component(self):
"col_count": 3,
"col_width": None,
"default_value": [
- [None, None, None],
- [None, None, None],
- [None, None, None],
+ ["", "", ""],
+ ["", "", ""],
+ ["", "", ""],
],
"name": "dataframe",
},
diff --git a/ui/packages/app/test/blocks-basic.spec.ts b/ui/packages/app/test/blocks-basic.spec.ts
--- a/ui/packages/app/test/blocks-basic.spec.ts
+++ b/ui/packages/app/test/blocks-basic.spec.ts
@@ -26,7 +26,7 @@ function mock_api(page: Page, body: Array<unknown>) {
}
test("renders the correct elements", async ({ page }) => {
- await mock_demo(page, "xray_blocks");
+ await mock_demo(page, "blocks_xray");
await page.goto("http://localhost:3000");
const description = await page.locator(".output-markdown");
@@ -40,7 +40,7 @@ test("renders the correct elements", async ({ page }) => {
});
test("can run an api request and display the data", async ({ page }) => {
- await mock_demo(page, "xray_blocks");
+ await mock_demo(page, "blocks_xray");
await mock_api(page, [
[
{
@@ -58,9 +58,6 @@ test("can run an api request and display the data", async ({ page }) => {
await page.goto("http://localhost:3000");
- // await page.locator('button:has-text("Covid")').click();
- // await page.locator('button:has-text("Lung Cancer")').click();
-
await page.check("label:has-text('Covid')");
await page.check("label:has-text('Lung Cancer')");
| Dataframe input does not refresh as intended
### Describe the bug
When using a dataframe as input, the Gradio app output is locked to the input received when run for the first time. Subsequent user updates to the dataframe is not reflected in the Gradio app output. In addition, when "Clear" is selected to clear the outputs, the Gradio app behaves as if an empty dataframe is passed. The app has to be relaunched to have new input processed correctly.
Reporting both instances in the same issue as I suspect they both have to do with how Gradio handles dataframe input under the hood.
### Reproduction
Both of these are reproducible using the `filter_records.ipynb` demo at https://colab.research.google.com/drive/1mSrwzeNRMQc0FBiUY9Yx_rwqyX1X-7Cf#scrollTo=RN-8fjhplcnX. Code reproduced below for convenience:
```
import gradio as gr
def filter_records(records, gender):
return records[records["gender"] == gender]
iface = gr.Interface(
filter_records,
[
gr.inputs.Dataframe(
headers=["name", "age", "gender"],
datatype=["str", "number", "str"],
row_count=5,
),
gr.inputs.Dropdown(["M", "F", "O"]),
],
"dataframe",
description="Enter gender as 'M', 'F', or 'O' for other.",
)
iface.test_launch()
iface.launch()
```
Including reproduction steps in the screenshot section below.
### Screenshot
1. Launch the app, key in the following table, and run the app. The output will correctly filter to show only the first row.
| name | age | gender |
|--------|-----|----------|
| A | 1 | M |
| B | 1 | F |
| C | 1 | O |

2. Change `gender` in rows 2 and 3 to "M", and submit the new input. All rows should be displayed as expected output, but the output did not change.

3. Select "Clear" then "Submit". The output is now blank, as if no rows are given as input.

### Logs
```shell
No logs generated
```
### System Info
```shell
Gradio version: 2.9.1, running on Google Colab
OS: MS Windows
Browser: Edge
```
### Severity
blocker
| Same problem, if i change the data and doesnt click clear button,the dataframe will not refresh. Not only change the data manually but also use `Examples` to input, but when i click `Clear` button then `Submit`, the output will refresh.
My app's input is image, but yours are dataframe, when i click the `Clear`button ,it seems doesnt work at all, think the problem is input dataframe part
Thanks for letting us know @tnwei and @Olvi73, we are working on fixing all of the issues with the `Dataframe` component.
cc @pngwn | 2022-04-12T14:11:26 |
gradio-app/gradio | 1,038 | gradio-app__gradio-1038 | [
"838"
] | db481311a68f09ed06731f896d2b24a5dde17bff | diff --git a/demo/no_input/run.py b/demo/no_input/run.py
new file mode 100644
--- /dev/null
+++ b/demo/no_input/run.py
@@ -0,0 +1,16 @@
+import gradio as gr
+import random
+
+sentence_list = [
+ "Good morning!",
+ "Prayers are with you, have a safe day!",
+ "I love you!"
+]
+
+
+def random_sentence():
+ return sentence_list[random.randint(0, 2)]
+
+
+demo = gr.Interface(fn=random_sentence, outputs="text")
+demo.launch()
diff --git a/gradio/interface.py b/gradio/interface.py
--- a/gradio/interface.py
+++ b/gradio/interface.py
@@ -172,6 +172,11 @@ def __init__(
"""
super().__init__(analytics_enabled=analytics_enabled, mode="interface")
+ if inputs is None:
+ inputs = []
+ if outputs is None:
+ outputs = []
+
if not isinstance(fn, list):
fn = [fn]
if not isinstance(inputs, list):
| Allow input-less and output-less interfaces
**Is your feature request related to a problem? Please describe.**
Sometimes, I want to create an interface without an input (such as for GANs) or without outputs (for crowd-sourced dataset generation). Internal Slack discussion https://huggingface.slack.com/archives/C02QZLG8GMN/p1647601809063159
**Describe the solution you'd like**
Allowign something like `gr.Interface(fn=fn, outputs="image")`
| 2022-04-20T11:23:44 |
||
gradio-app/gradio | 1,077 | gradio-app__gradio-1077 | [
"1055"
] | e86739bd508daaca09ee66b974f459b6e070809a | diff --git a/demo/kitchen_sink/run.py b/demo/kitchen_sink/run.py
--- a/demo/kitchen_sink/run.py
+++ b/demo/kitchen_sink/run.py
@@ -1,3 +1,4 @@
+import os
import json
import numpy as np
@@ -40,9 +41,9 @@ def fn(
}, # Label
(audio1[0], np.flipud(audio1[1]))
if audio1 is not None
- else "files/cantina.wav", # Audio
- np.flipud(im1) if im1 is not None else "files/cheetah1.jpg", # Image
- video if video is not None else "files/world.mp4", # Video
+ else os.path.join(os.path.dirname(__file__),"files/cantina.wav"), # Audio
+ np.flipud(im1) if im1 is not None else os.path.join(os.path.dirname(__file__),"files/cheetah1.jpg"), # Image
+ video if video is not None else os.path.join(os.path.dirname(__file__),"files/world.mp4"), # Video
[
("The", "art"),
("quick brown", "adj"),
@@ -72,11 +73,11 @@ def fn(
"<button style='background-color: red'>Click Me: "
+ radio
+ "</button>", # HTML
- "files/titanic.csv",
+ os.path.join(os.path.dirname(__file__),"files/titanic.csv"),
df1, # Dataframe
np.random.randint(0, 10, (4, 4)), # Dataframe
[
- im for im in [im1, im2, im3, im4, "files/cheetah1.jpg"] if im is not None
+ im for im in [im1, im2, im3, im4, os.path.join(os.path.dirname(__file__),"files/cheetah1.jpg")] if im is not None
], # Carousel
df2, # Timeseries
)
@@ -136,16 +137,16 @@ def fn(
["foo", "baz"],
"baz",
"bar",
- "files/cheetah1.jpg",
- "files/cheetah1.jpg",
- "files/cheetah1.jpg",
- "files/cheetah1.jpg",
- "files/world.mp4",
- "files/cantina.wav",
- "files/cantina.wav",
- "files/titanic.csv",
+ os.path.join(os.path.dirname(__file__),"files/cheetah1.jpg"),
+ os.path.join(os.path.dirname(__file__),"files/cheetah1.jpg"),
+ os.path.join(os.path.dirname(__file__),"files/cheetah1.jpg"),
+ os.path.join(os.path.dirname(__file__),"files/cheetah1.jpg"),
+ os.path.join(os.path.dirname(__file__),"files/world.mp4"),
+ os.path.join(os.path.dirname(__file__),"files/cantina.wav"),
+ os.path.join(os.path.dirname(__file__),"files/cantina.wav"),
+ os.path.join(os.path.dirname(__file__),"files/titanic.csv"),
[[1, 2, 3], [3, 4, 5]],
- "files/time.csv",
+ os.path.join(os.path.dirname(__file__),"files/time.csv"),
]
]
* 3,
diff --git a/demo/main_note/run.py b/demo/main_note/run.py
--- a/demo/main_note/run.py
+++ b/demo/main_note/run.py
@@ -1,6 +1,6 @@
from math import log2, pow
+import os
-import matplotlib.pyplot as plt
import numpy as np
from scipy.fftpack import fft
@@ -46,8 +46,8 @@ def main_note(audio):
gr.Audio(source="microphone"),
gr.Label(num_top_classes=4),
examples=[
- ["audio/recording1.wav"],
- ["audio/cantina.wav"],
+ [os.path.join(os.path.dirname(__file__),"audio/recording1.wav")],
+ [os.path.join(os.path.dirname(__file__),"audio/cantina.wav")],
],
interpretation="default",
)
diff --git a/demo/zip_two_files/run.py b/demo/zip_two_files/run.py
--- a/demo/zip_two_files/run.py
+++ b/demo/zip_two_files/run.py
@@ -1,3 +1,4 @@
+import os
from zipfile import ZipFile
import gradio as gr
@@ -15,7 +16,8 @@ def zip_two_files(file1, file2):
["file", "file"],
"file",
examples=[
- ["files/titanic.csv", "files/titanic.csv"],
+ [os.path.join(os.path.dirname(__file__),"files/titanic.csv"),
+ os.path.join(os.path.dirname(__file__),"files/titanic.csv")],
],
)
| diff --git a/ui/packages/app/src/components/Carousel/Carousel.test.svelte b/ui/packages/app/src/components/Carousel/Carousel.test.svelte
--- a/ui/packages/app/src/components/Carousel/Carousel.test.svelte
+++ b/ui/packages/app/src/components/Carousel/Carousel.test.svelte
@@ -4,7 +4,7 @@
import api_logo from "../../../public/static/img/api-logo.svg";
</script>
-<Carousel on:change>
+<Carousel on:change loading_status="complete">
<CarouselItem>
<h1>Item 1</h1>
</CarouselItem>
diff --git a/ui/packages/app/src/components/utils/helpers.test.ts b/ui/packages/app/src/components/utils/helpers.test.ts
--- a/ui/packages/app/src/components/utils/helpers.test.ts
+++ b/ui/packages/app/src/components/utils/helpers.test.ts
@@ -74,7 +74,7 @@ describe("randInt", () => {
test("respects min and max when negative", () => {
const n = randInt(-100, -10);
- assert.ok(n > -100 && n < -10);
+ assert.ok(n >= -100 && n <= -10);
});
});
diff --git a/ui/packages/app/test/blocks-basic.spec.ts b/ui/packages/app/test/blocks-basic.spec.ts
--- a/ui/packages/app/test/blocks-basic.spec.ts
+++ b/ui/packages/app/test/blocks-basic.spec.ts
@@ -68,6 +68,6 @@ test("can run an api request and display the data", async ({ page }) => {
page.waitForResponse("**/api/predict/")
]);
- const json = await page.locator(".output-json");
+ const json = await page.locator("data-testid=json");
await expect(json).toContainText(`Covid: 0.75, Lung Cancer: 0.25`);
});
| Block wrapper handling
~~Two~~Three main points here:
- [x] We are wrapping _almost_ every component with `Block` inside the component istelf. This is okay but there is a lot of repetition. It might be able to do this at the app level, when we render the components from the config. This would simplify the components a little but would require a little refactoring. One to investigate rather than implement blindly.
- [x] `Radio` and `CheckboxGroup` use a `fieldset` element as their wrapper rather than the `Block` component, because they are a collection of form elements that exist together. This results in some duplication of code/classes, however small, to get consistent styling. We could pass a prop to `Blocks` and use `svelte:element` to switch between a `div` and a `fieldset`.
- [ ] Related to the final point, the form components that use a fieldset should use a `legend` element for their 'title'/'label' for accessibility reasons.
| 2022-04-25T10:53:13 |
|
gradio-app/gradio | 1,105 | gradio-app__gradio-1105 | [
"946"
] | 90f3aec388925ab867b4a6609660dd39cac13490 | diff --git a/demo/blocks_outputs/run.py b/demo/blocks_outputs/run.py
--- a/demo/blocks_outputs/run.py
+++ b/demo/blocks_outputs/run.py
@@ -1,27 +1,35 @@
import gradio as gr
with gr.Blocks() as demo:
- txt = gr.Textbox(label="Small Textbox", lines=1)
- txt = gr.Textbox(label="Large Textbox", lines=5)
- num = gr.Number(label="Number")
- check = gr.Checkbox(label="Checkbox")
- check_g = gr.CheckboxGroup(label="Checkbox Group", choices=["One", "Two", "Three"])
- radio = gr.Radio(label="Radio", choices=["One", "Two", "Three"])
- drop = gr.Dropdown(label="Dropdown", choices=["One", "Two", "Three"])
- slider = gr.Slider(label="Slider")
- audio = gr.Audio()
- video = gr.Video()
- image = gr.Image()
- ts = gr.Timeseries()
- df = gr.Dataframe()
- html = gr.HTML()
- json = gr.JSON()
- md = gr.Markdown()
- label = gr.Label()
- highlight = gr.HighlightedText()
- # layout components are static only
- # carousel doesn't work like other components
- # carousel = gr.Carousel()
+ with gr.Column():
+ txt = gr.Textbox(label="Small Textbox", lines=1, show_label=False)
+ txt = gr.Textbox(label="Large Textbox", lines=5, show_label=False)
+ num = gr.Number(label="Number", show_label=False)
+ check = gr.Checkbox(label="Checkbox", show_label=False)
+ check_g = gr.CheckboxGroup(
+ label="Checkbox Group", choices=["One", "Two", "Three"], show_label=False
+ )
+ radio = gr.Radio(
+ label="Radio", choices=["One", "Two", "Three"], show_label=False
+ )
+ drop = gr.Dropdown(
+ label="Dropdown", choices=["One", "Two", "Three"], show_label=False
+ )
+ slider = gr.Slider(label="Slider", show_label=False)
+ audio = gr.Audio(show_label=False)
+ file = gr.File(show_label=False)
+ video = gr.Video(show_label=False)
+ image = gr.Image(show_label=False)
+ ts = gr.Timeseries(show_label=False)
+ df = gr.Dataframe(show_label=False)
+ html = gr.HTML(show_label=False)
+ json = gr.JSON(show_label=False)
+ md = gr.Markdown(show_label=False)
+ label = gr.Label(show_label=False)
+ highlight = gr.HighlightedText(show_label=False)
+ # layout components are static only
+ # carousel doesn't work like other components
+ # carousel = gr.Carousel()
if __name__ == "__main__":
| [blocks] optional labels for certain inputs
For most form elements, labels are required for accessibility reasons but for _outputs_ the case is less clear. They certainly need useful descriptions but since they are not interactive form elements, the accesibility considerations are more around readability.
We need to figure out how/ where we can make certain labels optional without compromising end user usabilty or accessibility.
Continuation of #704.
| @pngwn I was looking into this and it looks like labels needed to be provided for accessibility reasons, but they don't necessarily need to be _visible_:
> A label for a form control helps everyone better understand its purpose. In some cases, the purpose may be clear enough from the context when the content is rendered visually. The label can be hidden visually, though it still needs to be provided within the code to support other forms of presentation and interaction, such as for screen reader and speech input users. The method used in this tutorial to hide an element visually but make it available for assistive technologies is explained in [Note on hiding element](https://www.w3.org/WAI/tutorials/forms/labels/#note-on-hiding-elements).
Source: https://www.w3.org/WAI/tutorials/forms/labels/
I think it would be nice if people can make the labels invisible because sometimes they clutter up the UI or look out of place. What do you think about adding a "hide_label" boolean parameter to each of the form elements? False by default, but can be set to True, which makes the label invisible.
Works for me, tailwind has a class for that: https://tailwindcss.com/docs/screen-readers | 2022-04-27T10:23:35 |
|
gradio-app/gradio | 1,146 | gradio-app__gradio-1146 | [
"999"
] | 87dcdd713453f34d86896d82400652682411bfe4 | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -158,15 +158,23 @@ def get_template_context(self):
class Column(BlockContext):
- def __init__(self, visible: bool = True, css: Optional[Dict[str, str]] = None):
+ def __init__(
+ self,
+ visible: bool = True,
+ css: Optional[Dict[str, str]] = None,
+ variant: str = "default",
+ ):
"""
css: Css rules to apply to block.
+ variant: column type, 'default' (no background) or 'panel' (gray background color and rounded corners)
"""
+ self.variant = variant
super().__init__(visible, css)
def get_template_context(self):
return {
"type": "column",
+ "variant": self.variant,
**super().get_template_context(),
}
diff --git a/gradio/interface.py b/gradio/interface.py
--- a/gradio/interface.py
+++ b/gradio/interface.py
@@ -468,13 +468,7 @@ def clean_html(raw_html):
self.InterfaceTypes.INPUT_ONLY,
self.InterfaceTypes.UNIFIED,
]:
- with Column(
- css={
- "background-color": "rgb(249,250,251)",
- "padding": "0.5rem",
- "border-radius": "0.5rem",
- }
- ):
+ with Column(variant="panel"):
input_component_column = Column()
if self.interface_type in [
self.InterfaceTypes.INPUT_ONLY,
@@ -516,13 +510,7 @@ def clean_html(raw_html):
self.InterfaceTypes.OUTPUT_ONLY,
]:
- with Column(
- css={
- "background-color": "rgb(249,250,251)",
- "padding": "0.5rem",
- "border-radius": "0.5rem",
- }
- ):
+ with Column(variant="panel"):
status_tracker = StatusTracker(cover_container=True)
for component in self.output_components:
component.render()
| handle custom styling for interfaces in the frontend rather than in the python lib
At the minute we are injecting custom CSS to create the interface design. Ideally we would not need to do this and would use either some new layout components (`Panel`, etc.) or custom CSS properties to modify the components we are using.
The current approach makes the dev experience very tedious, when modifying the interface styling.
| 2022-05-02T10:48:23 |
||
gradio-app/gradio | 1,149 | gradio-app__gradio-1149 | [
"1141"
] | b0306e716da49895af7777d789c7ee0263658964 | diff --git a/demo/kitchen_sink/run.py b/demo/kitchen_sink/run.py
--- a/demo/kitchen_sink/run.py
+++ b/demo/kitchen_sink/run.py
@@ -41,9 +41,13 @@ def fn(
}, # Label
(audio1[0], np.flipud(audio1[1]))
if audio1 is not None
- else os.path.join(os.path.dirname(__file__),"files/cantina.wav"), # Audio
- np.flipud(im1) if im1 is not None else os.path.join(os.path.dirname(__file__),"files/cheetah1.jpg"), # Image
- video if video is not None else os.path.join(os.path.dirname(__file__),"files/world.mp4"), # Video
+ else os.path.join(os.path.dirname(__file__), "files/cantina.wav"), # Audio
+ np.flipud(im1)
+ if im1 is not None
+ else os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), # Image
+ video
+ if video is not None
+ else os.path.join(os.path.dirname(__file__), "files/world.mp4"), # Video
[
("The", "art"),
("quick brown", "adj"),
@@ -73,11 +77,19 @@ def fn(
"<button style='background-color: red'>Click Me: "
+ radio
+ "</button>", # HTML
- os.path.join(os.path.dirname(__file__),"files/titanic.csv"),
+ os.path.join(os.path.dirname(__file__), "files/titanic.csv"),
df1, # Dataframe
np.random.randint(0, 10, (4, 4)), # Dataframe
[
- im for im in [im1, im2, im3, im4, os.path.join(os.path.dirname(__file__),"files/cheetah1.jpg")] if im is not None
+ im
+ for im in [
+ im1,
+ im2,
+ im3,
+ im4,
+ os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"),
+ ]
+ if im is not None
], # Carousel
df2, # Timeseries
)
@@ -106,7 +118,7 @@ def fn(
gr.Audio(label="Microphone", source="microphone"),
gr.File(label="File"),
gr.Dataframe(label="Dataframe", headers=["Name", "Age", "Gender"]),
- gr.Timeseries(x="time", y=["price", "value"]),
+ gr.Timeseries(x="time", y=["price", "value"], colors=["pink", "purple"]),
],
outputs=[
gr.Textbox(label="Textbox"),
@@ -137,16 +149,16 @@ def fn(
["foo", "baz"],
"baz",
"bar",
- os.path.join(os.path.dirname(__file__),"files/cheetah1.jpg"),
- os.path.join(os.path.dirname(__file__),"files/cheetah1.jpg"),
- os.path.join(os.path.dirname(__file__),"files/cheetah1.jpg"),
- os.path.join(os.path.dirname(__file__),"files/cheetah1.jpg"),
- os.path.join(os.path.dirname(__file__),"files/world.mp4"),
- os.path.join(os.path.dirname(__file__),"files/cantina.wav"),
- os.path.join(os.path.dirname(__file__),"files/cantina.wav"),
- os.path.join(os.path.dirname(__file__),"files/titanic.csv"),
+ os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"),
+ os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"),
+ os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"),
+ os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"),
+ os.path.join(os.path.dirname(__file__), "files/world.mp4"),
+ os.path.join(os.path.dirname(__file__), "files/cantina.wav"),
+ os.path.join(os.path.dirname(__file__), "files/cantina.wav"),
+ os.path.join(os.path.dirname(__file__), "files/titanic.csv"),
[[1, 2, 3], [3, 4, 5]],
- os.path.join(os.path.dirname(__file__),"files/time.csv"),
+ os.path.join(os.path.dirname(__file__), "files/time.csv"),
]
]
* 3,
diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -2242,6 +2242,7 @@ def __init__(
y: str | List[str] = None,
label: Optional[str] = None,
css: Optional[Dict] = None,
+ colors: List[str] = None,
**kwargs,
):
"""
@@ -2250,6 +2251,7 @@ def __init__(
x (str): Column name of x (time) series. None if csv has no headers, in which case first column is x series.
y (Union[str, List[str]]): Column name of y series, or list of column names if multiple series. None if csv has no headers, in which case every column after first is a y series.
label (str): component name in interface.
+ colors List[str]: an ordered list of colors to use for each line plot
"""
self.default_value = (
pd.read_csv(default_value) if default_value is not None else None
@@ -2258,6 +2260,7 @@ def __init__(
if isinstance(y, str):
y = [y]
self.y = y
+ self.colors = colors
super().__init__(label=label, css=css, **kwargs)
def get_template_context(self):
@@ -2265,6 +2268,7 @@ def get_template_context(self):
"x": self.x,
"y": self.y,
"default_value": self.default_value,
+ "colors": self.colors,
**super().get_template_context(),
}
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -1080,6 +1080,7 @@ def test_component_functions(self):
"name": "timeseries",
"show_label": True,
"label": "Upload Your Timeseries",
+ "colors": None,
"css": {},
"default_value": None,
"interactive": None,
@@ -1101,6 +1102,7 @@ def test_component_functions(self):
"name": "timeseries",
"show_label": True,
"label": "Disease",
+ "colors": None,
"css": {},
"default_value": None,
"interactive": None,
| Time series with decimal numbers
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
It seems currently time series component only works with integer values.
**Describe the solution you'd like**
Add support for decimal values.
**Additional context**
| Do you have some example data or a simple repro that, I could test this with? | 2022-05-03T12:12:43 |
gradio-app/gradio | 1,153 | gradio-app__gradio-1153 | [
"980"
] | b0306e716da49895af7777d789c7ee0263658964 | diff --git a/demo/gpt_j/run.py b/demo/gpt_j/run.py
--- a/demo/gpt_j/run.py
+++ b/demo/gpt_j/run.py
@@ -10,7 +10,7 @@
demo = gr.Interface.load(
"huggingface/EleutherAI/gpt-j-6B",
- inputs=gr.Textbox(lines=5, label="Input Text"),
+ inputs=gr.Textbox(lines=5, max_lines=6, label="Input Text"),
title=title,
examples=examples,
)
| diff --git a/ui/packages/app/src/components/Textbox/Textbox.test.ts b/ui/packages/app/src/components/Textbox/Textbox.test.ts
--- a/ui/packages/app/src/components/Textbox/Textbox.test.ts
+++ b/ui/packages/app/src/components/Textbox/Textbox.test.ts
@@ -9,26 +9,26 @@ describe("Textbox", () => {
afterEach(() => cleanup());
test("renders provided value", () => {
- const { container, getByLabelText } = render(Textbox, {
+ const { getByDisplayValue } = render(Textbox, {
lines: 1,
mode: "dynamic",
value: "hello world",
label: "Textbox"
});
- const item: HTMLInputElement = getByLabelText("Textbox");
+ const item: HTMLInputElement = getByDisplayValue("hello world");
assert.equal(item.value, "hello world");
});
test("changing the text should update the value", async () => {
- const { component, getByLabelText } = render(Textbox, {
+ const { component, getByLabelText, getByDisplayValue } = render(Textbox, {
lines: 1,
mode: "dynamic",
- value: "",
+ value: "hi ",
label: "Textbox"
});
- const item: HTMLInputElement = getByLabelText("Textbox");
+ const item: HTMLInputElement = getByDisplayValue("hi");
const mock = spy();
component.$on("change", mock);
@@ -39,22 +39,9 @@ describe("Textbox", () => {
// wait for debounce
await wait(300);
- assert.equal(item.value, "some text");
- assert.equal(component.value, "some text");
+ assert.equal(item.value, "hi some text");
+ assert.equal(component.value, "hi some text");
assert.equal(mock.callCount, 1);
- assert.equal(mock.calls[0][0].detail, "some text");
- });
-
- test("component should respect placeholder", async () => {
- const { getByLabelText } = render(Textbox, {
- lines: 1,
- mode: "dynamic",
- value: "",
- placeholder: "placeholder text",
- label: "Textbox"
- });
-
- const item: HTMLInputElement = getByLabelText("Textbox");
- assert.equal(item.placeholder, "placeholder text");
+ assert.equal(mock.calls[0][0].detail, "hi some text");
});
});
| Textbox Autoheight Logic
Continuation of halted discussion at #929
I propose having `lines=1` by default, always having autoheight logic for `textbox` component to increase its size with the incoming input. What are your thoughts @abidlabs @aliabid94 @pngwn?
```
@abidlabs @pngwn I think having input textbox lines dynamically changing would be great as well. It would make it much more less cumbersome, WDYT?
Guess it depends how picky users are about how the component is laid out. We could use the autoheight logic when a lines value isn't provided?
Yeah I thought the same thing at first, then later thought what about respecting the line size initially and let the line size increase when input does not fit in.
Btw the default lines value is 1 currently, but we could change it.
```
| I don't have a strong opinion on this. I can see how increasing the input textbox height can be useful in some cases, but in other cases (imagine a large text that is copy-pasted), then it could lead to the textbox becoming very large and crowding out the other components.
> I don't have a strong opinion on this. I can see how increasing the input textbox height can be useful in some cases, but in other cases (imagine a large text that is copy-pasted), then it could lead to the textbox becoming very large and crowding out the other components.
Is large text a use case though?
If it is then we shan't enable it by default or make it closable by parameters.
Then how does this sound?
- If lines is not provided we make it autoheight
- If autoheight is True, we make it autoheight
- otherwise autoHeight is False
We could also set a max pretty easily.
Then what about taking
- lines as min
- max_lines as max
- autoheight is enabled by default, only disabled when it is `autoheight=False`
I assume that should be `autoheight=False` above.
Otherwise, seems reasonable.
I think we can wait until after 3.0 as this doesn't seem be urgent.
Would like to include in 3.0 if it is a short work from frontend perspective. Otherwise can wait in low priority.
Frontend logic is already there really (for outputs), just needs hooking up to whatever param we want to use to control it. I think there should be a default max_height/ max_lines otherwise weird stuff might happen. Probably around 10.
Ok then, I will update accordingly.
Sorry one quick thing:
If we have a `lines` parameter and a `max_lines` parameter, do we need `autoheight`? We can just allow a user to block auto-adjusting by setting `max_lines` equal to `lines`? We can cutdown on one unnecessary parameter and prevent parameter settings from clashing
Agree with @abidlabs, lets use `lines` as max_lines.
Proposal:
autoheight is enabled by default:
- when autoheight is True, min_lines is 1 and max_lines is `lines`
- when autoheight is False, min_lines = max_lines = `lines`
How does this sound @abidlabs and @pngwn?
Actually, I wasn't suggesting using `lines` as `max_lines`. I was saying to drop the `autoheight` parameter, because that can be inferred from the value of `lines` and `max_lines`.
* We should use `lines` as the number of lines that are shown by default (aka the minimum height), because that's the current behavior and we don't want to break that
* We can add `max_lines` to mean the biggest that the `Textbox` can become. By default, this could be equal to `None`, which means that the textbox can become arbitrarily large. If the user does not want resizing, they can simply set this equal to `lines`
Makes sense, I support this, what about you @pngwn?
> Actually, I wasn't suggesting using `lines` as `max_lines`. I was saying to drop the `autoheight` parameter, because that can be inferred from the value of `lines` and `max_lines`.
>
> * We should use `lines` as the number of lines that are shown by default (aka the minimum height), because that's the current behavior and we don't want to break that
>
> * We can add `max_lines` to mean the biggest that the `Textbox` can become. By default, this could be equal to `None`, which means that the textbox can become arbitrarily large. If the user does not want resizing, they can simply set this equal to `lines`
Moving this to 3.0 milestone since it is implemented in the backend #1009, feel free to move it to 2023 milestone. | 2022-05-03T14:41:12 |
gradio-app/gradio | 1,154 | gradio-app__gradio-1154 | [
"1123"
] | b0306e716da49895af7777d789c7ee0263658964 | diff --git a/demo/blocks_xray/run.py b/demo/blocks_xray/run.py
--- a/demo/blocks_xray/run.py
+++ b/demo/blocks_xray/run.py
@@ -27,7 +27,7 @@ def ct_model(diseases, img):
)
with gr.Tabs():
- with gr.TabItem("X-ray"):
+ with gr.TabItem("X-ray") as x_tab:
with gr.Row():
xray_scan = gr.Image()
xray_results = gr.JSON()
diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -202,7 +202,7 @@ def __init__(
def get_template_context(self):
return {"label": self.label, **super().get_template_context()}
- def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
+ def select(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
"""
Parameters:
fn: Callable function
@@ -210,7 +210,7 @@ def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]
outputs: List of outputs
Returns: None
"""
- self.set_event_trigger("change", fn, inputs, outputs)
+ self.set_event_trigger("select", fn, inputs, outputs)
class BlockFunction:
| implement `change` event for individual `TabItem`s
Implemented on the backend, not the front.
Note: maybe change the name to `show`, `load`, `select`.
@abidlabs any thoughts on the name?
| No strong opinion. Maybe `select()` because the event happens when the tab item is selected | 2022-05-03T15:15:12 |
|
gradio-app/gradio | 1,159 | gradio-app__gradio-1159 | [
"1116"
] | 6a34a67b0a9107c46c62972570190aecb7d03632 | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -238,7 +238,7 @@ def __init__(
theme: str = "default",
analytics_enabled: Optional[bool] = None,
mode: str = "blocks",
- enable_queue: bool = False,
+ enable_queue: bool = None,
**kwargs,
):
diff --git a/gradio/processing_utils.py b/gradio/processing_utils.py
--- a/gradio/processing_utils.py
+++ b/gradio/processing_utils.py
@@ -126,7 +126,12 @@ def resize_and_crop(img, size, crop_type="center"):
def audio_from_file(filename, crop_min=0, crop_max=100):
- audio = AudioSegment.from_file(filename)
+ try:
+ audio = AudioSegment.from_file(filename)
+ except FileNotFoundError as e:
+ error_message = str(e)
+ if "ffprobe" in error_message:
+ print("Please install `ffmpeg` in your system to use non-WAV audio file formats.")
if crop_min != 0 or crop_max != 100:
audio_start = len(audio) * crop_min / 100
audio_end = len(audio) * crop_max / 100
diff --git a/gradio/tunneling.py b/gradio/tunneling.py
--- a/gradio/tunneling.py
+++ b/gradio/tunneling.py
@@ -10,7 +10,11 @@
import warnings
from io import StringIO
-import paramiko
+from cryptography.utils import CryptographyDeprecationWarning
+
+with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", category=CryptographyDeprecationWarning)
+ import paramiko
def handler(chan, host, port):
| Blowfish is deprecated
### Describe the bug
When launching a block, I get warnings about `Blowfish` being deprecated
### Reproduction
```py
!pip install --quiet gradio==2.9.0b9
import gradio as gr
demo = gr.Blocks()
with demo:
with gr.Tabs():
with gr.TabItem("Text"):
gr.Interface(lambda x:x, "text", "text")
with gr.TabItem("Image"):
gr.Interface(lambda x:x, "image", "image")
demo.launch()
```
### Screenshot
_No response_
### Logs
```shell
/usr/local/lib/python3.7/dist-packages/paramiko/transport.py:236: CryptographyDeprecationWarning: Blowfish has been deprecated
"class": algorithms.Blowfish,
```
### System Info
```shell
Gradio 2.9.0b9, colab or spaces
```
### Severity
annoyance
| 2022-05-04T09:39:05 |
||
gradio-app/gradio | 1,164 | gradio-app__gradio-1164 | [
"1148"
] | 9eb6a6aebb3e0aaa4070b8cd12299c3d0aafca15 | diff --git a/demo/fake_gan/run.py b/demo/fake_gan/run.py
--- a/demo/fake_gan/run.py
+++ b/demo/fake_gan/run.py
@@ -24,6 +24,8 @@ def fake_gan(count, *args):
return images
+cheetah = os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg")
+
demo = gr.Interface(
fn=fake_gan,
inputs=[
@@ -38,21 +40,22 @@ def fake_gan(count, *args):
title="FD-GAN",
description="This is a fake demo of a GAN. In reality, the images are randomly chosen from Unsplash.",
examples=[
- [os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), 12, 12, 4, 4],
- [os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), 12, 12, 4, 4],
- [os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), 12, 12, 4, 4],
- [os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), 12, 12, 4, 4],
- [os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), 12, 12, 4, 4],
- [os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), 12, 12, 4, 4],
- [os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), 12, 12, 4, 4],
- [os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), 12, 12, 4, 4],
- [os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), 12, 12, 4, 4],
- [os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), 12, 12, 4, 4],
- [os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), 12, 12, 4, 4],
- [os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), 12, 12, 4, 4],
- [os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), 12, 12, 4, 4],
- [os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), 12, 12, 4, 4],
+ [2, cheetah, 12, 12, 4, 4],
+ [2, cheetah, 12, 12, 4, 4],
+ [2, cheetah, 12, 12, 4, 4],
+ [2, cheetah, 12, 12, 4, 4],
+ [2, cheetah, 12, 12, 4, 4],
+ [2, cheetah, 12, 12, 4, 4],
+ [2, cheetah, 12, 12, 4, 4],
+ [2, cheetah, 12, 12, 4, 4],
+ [2, cheetah, 12, 12, 4, 4],
+ [2, cheetah, 12, 12, 4, 4],
+ [2, cheetah, 12, 12, 4, 4],
+ [2, cheetah, 12, 12, 4, 4],
+ [2, cheetah, 12, 12, 4, 4],
+ [2, cheetah, 12, 12, 4, 4],
],
+ enable_queue=True,
)
if __name__ == "__main__":
| [blocks] show queue position, eta, and current duration with loading status
This finalises the loading status, requires some refactoring
| We should also make sure that the loading animation is appropriate for all of the components. I love the design, but we might need to tweak it for the smaller components | 2022-05-05T16:28:10 |
|
gradio-app/gradio | 1,185 | gradio-app__gradio-1185 | [
"1184"
] | b8fb0b47276eb295a324f14ae7f0e08203bc1242 | diff --git a/demo/blocks_layout/run.py b/demo/blocks_layout/run.py
new file mode 100644
--- /dev/null
+++ b/demo/blocks_layout/run.py
@@ -0,0 +1,31 @@
+import gradio as gr
+
+
+demo = gr.Blocks()
+
+with demo:
+ with gr.Row():
+ gr.Image(interactive=True)
+ gr.Image()
+ with gr.Row():
+ gr.Textbox(label="Text")
+ gr.Number(label="Count")
+ gr.Radio(choices=["One", "Two"])
+ with gr.Row():
+ with gr.Row():
+ with gr.Column():
+ gr.Textbox(label="Text")
+ gr.Number(label="Count")
+ gr.Radio(choices=["One", "Two"])
+ gr.Image()
+ with gr.Column():
+ gr.Image(interactive=True)
+ gr.Image()
+ gr.Image()
+ gr.Textbox(label="Text")
+ gr.Number(label="Count")
+ gr.Radio(choices=["One", "Two"])
+
+
+if __name__ == "__main__":
+ demo.launch()
| form groups broken when in a row
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
Form groups are broken when in a row, rather than a column. This just hadn't been accounted for.
https://github.com/gradio-app/gradio/issues/690#issuecomment-1119351175
### Reproduction
-
### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
-
```
### Severity
critical
| 2022-05-07T13:08:38 |
||
gradio-app/gradio | 1,226 | gradio-app__gradio-1226 | [
"1223"
] | 33ef3094513413cdcf278928bf49bea9c0d164f6 | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -522,7 +522,7 @@ def launch(
self.enable_queue = enable_queue or False
self.config = self.get_config_file()
-
+ self.share = share
self.encrypt = encrypt
if self.encrypt:
self.encryption_key = encryptor.get_key(
@@ -593,8 +593,6 @@ def launch(
print(strings.en["PUBLIC_SHARE_TRUE"])
self.share_url = None
- self.share = share
-
if inbrowser:
link = self.share_url if share else self.local_url
webbrowser.open(link)
| AttributeError: 'Blocks' object has no attribute 'share'
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
When share is not set this seems to happen sometimes when fetching the `index.html`:
```
AttributeError: 'Blocks' object has no attribute 'share'
```
The app launches as expected, just with huge error logs.
Pu it as critical because i don't know if this will cause problems in other contexts.
I think it was ibntroduced in this PR: https://github.com/gradio-app/gradio/pull/1208
it is this code: https://github.com/gradio-app/gradio/blob/main/gradio/routes.py#L167-L178
It also seems to generate a link tot he shared app but it isn't actually there.
### Reproduction
Run demo `xray_blocks`.
### Screenshot
_No response_
### Logs
Formatted: https://app.warp.dev/block/GeVcEzTPhl6o7w4rfc0ANA
```shell
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/opt/homebrew/lib/python3.9/site-packages/uvicorn/protocols/http/h11_impl.py", line 366, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/opt/homebrew/lib/python3.9/site-packages/uvicorn/middleware/proxy_headers.py", line 75, in __call__
return await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/applications.py", line 261, in __call__
await super().__call__(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/applications.py", line 112, in __call__
await self.middleware_stack(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/middleware/errors.py", line 181, in __call__
raise exc
File "/opt/homebrew/lib/python3.9/site-packages/starlette/middleware/errors.py", line 159, in __call__
await self.app(scope, receive, _send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/middleware/cors.py", line 84, in __call__
await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/exceptions.py", line 82, in __call__
raise exc
File "/opt/homebrew/lib/python3.9/site-packages/starlette/exceptions.py", line 71, in __call__
await self.app(scope, receive, sender)
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/middleware/asyncexitstack.py", line 21, in __call__
raise e
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/middleware/asyncexitstack.py", line 18, in __call__
await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/routing.py", line 656, in __call__
await route.handle(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/routing.py", line 259, in handle
await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/routing.py", line 61, in app
response = await func(request)
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/routing.py", line 227, in app
raw_response = await run_endpoint_function(
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/routing.py", line 162, in run_endpoint_function
return await run_in_threadpool(dependant.call, **values)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/concurrency.py", line 39, in run_in_threadpool
return await anyio.to_thread.run_sync(func, *args)
File "/opt/homebrew/lib/python3.9/site-packages/anyio/to_thread.py", line 28, in run_sync
return await get_asynclib().run_sync_in_worker_thread(func, *args, cancellable=cancellable,
File "/opt/homebrew/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 818, in run_sync_in_worker_thread
return await future
File "/opt/homebrew/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 754, in run
result = context.run(func, *args)
File "/Users/pngwn/Projects/gradio/gradio/routes.py", line 169, in main
"frontend/share.html" if app.blocks.share else "frontend/index.html"
AttributeError: 'Blocks' object has no attribute 'share'
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/opt/homebrew/lib/python3.9/site-packages/uvicorn/protocols/http/h11_impl.py", line 366, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/opt/homebrew/lib/python3.9/site-packages/uvicorn/middleware/proxy_headers.py", line 75, in __call__
return await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/applications.py", line 261, in __call__
await super().__call__(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/applications.py", line 112, in __call__
await self.middleware_stack(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/middleware/errors.py", line 181, in __call__
raise exc
File "/opt/homebrew/lib/python3.9/site-packages/starlette/middleware/errors.py", line 159, in __call__
await self.app(scope, receive, _send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/middleware/cors.py", line 84, in __call__
await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/exceptions.py", line 82, in __call__
raise exc
File "/opt/homebrew/lib/python3.9/site-packages/starlette/exceptions.py", line 71, in __call__
await self.app(scope, receive, sender)
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/middleware/asyncexitstack.py", line 21, in __call__
raise e
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/middleware/asyncexitstack.py", line 18, in __call__
await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/routing.py", line 656, in __call__
await route.handle(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/routing.py", line 259, in handle
await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/routing.py", line 61, in app
response = await func(request)
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/routing.py", line 227, in app
raw_response = await run_endpoint_function(
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/routing.py", line 162, in run_endpoint_function
return await run_in_threadpool(dependant.call, **values)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/concurrency.py", line 39, in run_in_threadpool
return await anyio.to_thread.run_sync(func, *args)
File "/opt/homebrew/lib/python3.9/site-packages/anyio/to_thread.py", line 28, in run_sync
return await get_asynclib().run_sync_in_worker_thread(func, *args, cancellable=cancellable,
File "/opt/homebrew/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 818, in run_sync_in_worker_thread
return await future
File "/opt/homebrew/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 754, in run
result = context.run(func, *args)
File "/Users/pngwn/Projects/gradio/gradio/routes.py", line 169, in main
"frontend/share.html" if app.blocks.share else "frontend/index.html"
AttributeError: 'Blocks' object has no attribute 'share'
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/opt/homebrew/lib/python3.9/site-packages/uvicorn/protocols/http/h11_impl.py", line 366, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/opt/homebrew/lib/python3.9/site-packages/uvicorn/middleware/proxy_headers.py", line 75, in __call__
return await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/applications.py", line 261, in __call__
await super().__call__(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/applications.py", line 112, in __call__
await self.middleware_stack(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/middleware/errors.py", line 181, in __call__
raise exc
File "/opt/homebrew/lib/python3.9/site-packages/starlette/middleware/errors.py", line 159, in __call__
await self.app(scope, receive, _send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/middleware/cors.py", line 84, in __call__
await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/exceptions.py", line 82, in __call__
raise exc
File "/opt/homebrew/lib/python3.9/site-packages/starlette/exceptions.py", line 71, in __call__
await self.app(scope, receive, sender)
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/middleware/asyncexitstack.py", line 21, in __call__
raise e
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/middleware/asyncexitstack.py", line 18, in __call__
await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/routing.py", line 656, in __call__
await route.handle(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/routing.py", line 259, in handle
await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/routing.py", line 61, in app
response = await func(request)
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/routing.py", line 227, in app
raw_response = await run_endpoint_function(
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/routing.py", line 162, in run_endpoint_function
return await run_in_threadpool(dependant.call, **values)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/concurrency.py", line 39, in run_in_threadpool
return await anyio.to_thread.run_sync(func, *args)
File "/opt/homebrew/lib/python3.9/site-packages/anyio/to_thread.py", line 28, in run_sync
return await get_asynclib().run_sync_in_worker_thread(func, *args, cancellable=cancellable,
File "/opt/homebrew/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 818, in run_sync_in_worker_thread
return await future
File "/opt/homebrew/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 754, in run
result = context.run(func, *args)
File "/Users/pngwn/Projects/gradio/gradio/routes.py", line 169, in main
"frontend/share.html" if app.blocks.share else "frontend/index.html"
AttributeError: 'Blocks' object has no attribute 'share'
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/opt/homebrew/lib/python3.9/site-packages/uvicorn/protocols/http/h11_impl.py", line 366, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/opt/homebrew/lib/python3.9/site-packages/uvicorn/middleware/proxy_headers.py", line 75, in __call__
return await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/applications.py", line 261, in __call__
await super().__call__(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/applications.py", line 112, in __call__
await self.middleware_stack(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/middleware/errors.py", line 181, in __call__
raise exc
File "/opt/homebrew/lib/python3.9/site-packages/starlette/middleware/errors.py", line 159, in __call__
await self.app(scope, receive, _send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/middleware/cors.py", line 84, in __call__
await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/exceptions.py", line 82, in __call__
raise exc
File "/opt/homebrew/lib/python3.9/site-packages/starlette/exceptions.py", line 71, in __call__
await self.app(scope, receive, sender)
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/middleware/asyncexitstack.py", line 21, in __call__
raise e
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/middleware/asyncexitstack.py", line 18, in __call__
await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/routing.py", line 656, in __call__
await route.handle(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/routing.py", line 259, in handle
await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/routing.py", line 61, in app
response = await func(request)
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/routing.py", line 227, in app
raw_response = await run_endpoint_function(
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/routing.py", line 162, in run_endpoint_function
return await run_in_threadpool(dependant.call, **values)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/concurrency.py", line 39, in run_in_threadpool
return await anyio.to_thread.run_sync(func, *args)
File "/opt/homebrew/lib/python3.9/site-packages/anyio/to_thread.py", line 28, in run_sync
return await get_asynclib().run_sync_in_worker_thread(func, *args, cancellable=cancellable,
File "/opt/homebrew/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 818, in run_sync_in_worker_thread
return await future
File "/opt/homebrew/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 754, in run
result = context.run(func, *args)
File "/Users/pngwn/Projects/gradio/gradio/routes.py", line 169, in main
"frontend/share.html" if app.blocks.share else "frontend/index.html"
AttributeError: 'Blocks' object has no attribute 'share'
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/opt/homebrew/lib/python3.9/site-packages/uvicorn/protocols/http/h11_impl.py", line 366, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/opt/homebrew/lib/python3.9/site-packages/uvicorn/middleware/proxy_headers.py", line 75, in __call__
return await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/applications.py", line 261, in __call__
await super().__call__(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/applications.py", line 112, in __call__
await self.middleware_stack(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/middleware/errors.py", line 181, in __call__
raise exc
File "/opt/homebrew/lib/python3.9/site-packages/starlette/middleware/errors.py", line 159, in __call__
await self.app(scope, receive, _send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/middleware/cors.py", line 84, in __call__
await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/exceptions.py", line 82, in __call__
raise exc
File "/opt/homebrew/lib/python3.9/site-packages/starlette/exceptions.py", line 71, in __call__
await self.app(scope, receive, sender)
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/middleware/asyncexitstack.py", line 21, in __call__
raise e
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/middleware/asyncexitstack.py", line 18, in __call__
await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/routing.py", line 656, in __call__
await route.handle(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/routing.py", line 259, in handle
await self.app(scope, receive, send)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/routing.py", line 61, in app
response = await func(request)
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/routing.py", line 227, in app
raw_response = await run_endpoint_function(
File "/opt/homebrew/lib/python3.9/site-packages/fastapi/routing.py", line 162, in run_endpoint_function
return await run_in_threadpool(dependant.call, **values)
File "/opt/homebrew/lib/python3.9/site-packages/starlette/concurrency.py", line 39, in run_in_threadpool
return await anyio.to_thread.run_sync(func, *args)
File "/opt/homebrew/lib/python3.9/site-packages/anyio/to_thread.py", line 28, in run_sync
return await get_asynclib().run_sync_in_worker_thread(func, *args, cancellable=cancellable,
File "/opt/homebrew/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 818, in run_sync_in_worker_thread
return await future
File "/opt/homebrew/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 754, in run
result = context.run(func, *args)
File "/Users/pngwn/Projects/gradio/gradio/routes.py", line 169, in main
"frontend/share.html" if app.blocks.share else "frontend/index.html"
AttributeError: 'Blocks' object has no attribute 'share'
Running on public URL: https://15734.gradio.app
```
### System Info
```shell
`main`
```
### Severity
critical
| 2022-05-12T14:24:18 |
||
gradio-app/gradio | 1,235 | gradio-app__gradio-1235 | [
"1231"
] | 5ba2e06e6971e946f2bdae0090b847056592a365 | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -171,6 +171,11 @@ def update(
"__type__": "update",
}
+ def style(self, equal_height: Optional[bool] = None):
+ if equal_height is not None:
+ self._style["equal_height"] = equal_height
+ return self
+
class Column(BlockContext):
def __init__(
diff --git a/gradio/interface.py b/gradio/interface.py
--- a/gradio/interface.py
+++ b/gradio/interface.py
@@ -467,7 +467,7 @@ def clean_html(raw_html):
)
if self.description:
Markdown(self.description)
- with Row():
+ with Row().style(equal_height=False):
if self.interface_type in [
self.InterfaceTypes.STANDARD,
self.InterfaceTypes.INPUT_ONLY,
| [digit_classifier] Image Source Canvas Style
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
Image as input source canvas overflow issues
### Reproduction
Test Notebook
https://colab.research.google.com/drive/1epV1ILJZMN6BrtizIpcD4sjqDclUSoty?usp=sharing
Demo from
https://github.com/gradio-app/gradio/blob/main/demo/digit_classifier/run.py
### Screenshot
https://user-images.githubusercontent.com/102277/168143672-e4fd7e36-caa4-4049-8b4e-352153024f45.mov
### Logs
_No response_
### System Info
```shell
3.0b0
```
### Severity
critical
| 2022-05-12T20:11:08 |
||
gradio-app/gradio | 1,276 | gradio-app__gradio-1276 | [
"1273"
] | b69e8cb1515ea3f59d113e73516245108c523fa2 | diff --git a/demo/blocks_essay/run.py b/demo/blocks_essay/run.py
--- a/demo/blocks_essay/run.py
+++ b/demo/blocks_essay/run.py
@@ -1,20 +1,22 @@
import gradio as gr
+
def change_textbox(choice):
if choice == "short":
- return gr.Radio.update(lines=2, visible=True)
+ return gr.Textbox.update(lines=2, visible=True)
elif choice == "long":
- return gr.Radio.update(lines=8, visible=True)
+ return gr.Textbox.update(lines=8, visible=True)
else:
- return gr.Radio.update(visible=False)
+ return gr.Textbox.update(visible=False)
with gr.Blocks() as demo:
- radio = gr.Radio(["short", "long", "none"],
- label="What kind of essay would you like to write?")
+ radio = gr.Radio(
+ ["short", "long", "none"], label="What kind of essay would you like to write?"
+ )
text = gr.Textbox(lines=2, interactive=True)
radio.change(fn=change_textbox, inputs=radio, outputs=text)
if __name__ == "__main__":
- demo.launch()
\ No newline at end of file
+ demo.launch()
diff --git a/demo/blocks_outputs/run.py b/demo/blocks_outputs/run.py
--- a/demo/blocks_outputs/run.py
+++ b/demo/blocks_outputs/run.py
@@ -35,7 +35,7 @@
headers=["One", "Two", "Three", "Four"],
col_count=(4, "fixed"),
row_count=(7, "fixed"),
- value=[[1, 2, 3, 4]],
+ value=[[0, 0, 0, 0]],
)
gr.Dataframe(
interactive=True, headers=["One", "Two", "Three", "Four"], col_count=4
| 3.0b8 gr.inputs.Radio linkage problem
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
I am using `gr.inputs.Radio` in gradio v3.0b8, and found that there is a linkage problem in different radio inputs. I made a demo:
```python
# gradio 3.0b8
import gradio as gr
def greet(name):
return "Hello " + name + "!!"
inputs_device = gr.inputs.Radio(choices=["cuda:0", "cpu"], default="cuda:0", label="设备")
inputs_size = gr.inputs.Radio(choices=[320, 640, 1280], default=320, label="推理尺寸")
iface = gr.Interface(fn=greet, inputs=[inputs_device, inputs_size], outputs="text")
iface.launch()
```
### Reproduction


### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
gradio 3.0b8
ubuntu 20.04
chrome
```
### Severity
annoyance
| 2022-05-16T14:03:22 |
||
gradio-app/gradio | 1,328 | gradio-app__gradio-1328 | [
"1314"
] | 5b827f5b2c8cb2bb756c71feec148efc8ee7e854 | diff --git a/demo/blocks_outputs/run.py b/demo/blocks_outputs/run.py
--- a/demo/blocks_outputs/run.py
+++ b/demo/blocks_outputs/run.py
@@ -27,15 +27,18 @@
md = gr.Markdown(show_label=False)
label = gr.Label(show_label=False)
highlight = gr.HighlightedText(show_label=False)
- gr.Dataframe(interactive=True, col_count=(3, "fixed"))
- gr.Dataframe(interactive=True, col_count=4)
- gr.Dataframe(interactive=True, headers=["One", "Two", "Three", "Four"])
+ gr.Dataframe(interactive=True, col_count=(3, "fixed"), label="Dataframe")
+ gr.Dataframe(interactive=True, col_count=4, label="Dataframe")
+ gr.Dataframe(
+ interactive=True, headers=["One", "Two", "Three", "Four"], label="Dataframe"
+ )
gr.Dataframe(
interactive=True,
headers=["One", "Two", "Three", "Four"],
col_count=(4, "fixed"),
row_count=(7, "fixed"),
value=[[0, 0, 0, 0]],
+ label="Dataframe",
)
gr.Dataframe(
interactive=True, headers=["One", "Two", "Three", "Four"], col_count=4
| DataFrame label not showing
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
When creating a dataframe with a label, the label does not show:
` gr.Dataframe(headers=["Mean","Min","Max","Std"], row_count=[1,'fixed'], col_count=[4,'fixed'], datatype="number", type="pandas", label = "testtt",show_label = True)`
<img width="242" alt="image" src="https://user-images.githubusercontent.com/105737531/168857762-0c43629f-8864-40c4-8d4f-1041f7ab4295.png">
### Reproduction
Create a DataFrame input with label
### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
Gradio 3.0.1
Python 3.9.7
Windows
```
### Severity
critical
| Thanks for the catch @atben123!
@pngwn I think we can put a tiny dataframe icon and label above the dataframe similar to how we do it for images, video, and so on.
@abidlabs I don't think we can because it will cover the table. We don't wrap the table in the same container that most of the other components have. I'll see what I can come up with. | 2022-05-18T13:15:04 |
|
gradio-app/gradio | 1,433 | gradio-app__gradio-1433 | [
"1406"
] | c96075a7b73e8401c3fe4b56fbba821213261625 | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -683,17 +683,21 @@ def launch(
"click the link to access the interface in a new tab."
)
try:
- from IPython.display import IFrame, display # type: ignore
+ from IPython.display import HTML, display # type: ignore
if share:
while not networking.url_ok(self.share_url):
time.sleep(1)
display(
- IFrame(self.share_url, width=self.width, height=self.height)
+ HTML(
+ f'<div><iframe src="{self.share_url}" width="{self.width}" height="{self.height}" allow="autoplay; camera; microphone;" frameborder="0" allowfullscreen></iframe></div>'
+ )
)
else:
display(
- IFrame(self.local_url, width=self.width, height=self.height)
+ HTML(
+ f'<div><iframe src="{self.local_url}" width="{self.width}" height="{self.height}" allow="autoplay; camera; microphone;" frameborder="0" allowfullscreen></iframe></div>'
+ )
)
except ImportError:
pass
| Set IFrame feature policy for Colab environment
### Describe the bug
Related to this discussion https://github.com/gradio-app/gradio/discussions/1389
There are two issues here,
1. Gradio generated `IFrame` doesn't have the right feature policies such as `allow="camera;microphone;`
https://github.com/gradio-app/gradio/blob/2eaf61cf7652a3ebc70ea877c33b4a66e0e0e1ce/gradio/blocks.py#L686-L697
It would be easy to add the IFrame `extras=['allow="camera;microphone;"']` parameter, however Collab is stuck to IPython 5.5.0 which doesn't support the `extras` parameter.
https://github.com/googlecolab/colabtools/issues/1582 IPython 5.50
<img width="595" alt="Screen Shot 2022-05-27 at 10 45 21" src="https://user-images.githubusercontent.com/102277/170768957-35a511e1-a2a2-4336-acca-97fa89e14d9a.png">
ps. in the picture the `extra` is working because one can upgrade the IPython `pip install ipython -U` on colab
### Solution
One Solution is to use `IPython.display.HTML` instead of `IFrame`
```python
display(
HTML(f'<iframe src="{self.share_url,}" width="{self.width}" height="{self.height}" allow="camera;microphone;">')
)
```
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
https://colab.research.google.com/drive/1dqNIMngC1ZS7HmJvyDrsYdsBjH3P0Fpe
### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
gradio==3.0.6
```
### Severity
annoying
| Thx for the detailed suggestion @radames, would you like to take this on an open a PR?
hi @FarukOzderim, yes I can do a PR if you think it's ok to replace the IFrame with HTML as I suggest, and do we add extra policies besides the camera, microphone, autoplay, here is my suggestion:
```html
<iframe allow="autoplay; camera; microphone;" src=""> </iframe>
```
I think it is okay 👍, we already ask for permissions afaik, there should be no problems. | 2022-05-31T19:59:20 |
|
gradio-app/gradio | 1,457 | gradio-app__gradio-1457 | [
"1353"
] | d8eb9575348752ef8b89085c6cfda1fd811e9af6 | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@
setup(
name="gradio",
- version="3.0.11",
+ version="3.0.12",
include_package_data=True,
description="Python library for easily interacting with trained machine learning models",
long_description=long_description,
| Blocks API - Checkbox change does not trigger
### Describe the bug
Can't get the `Checkbox.change` event to trigger 😕 did I read the documentation wrong?
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```python
import gradio as gr
def update(checked):
print("Updating component")
return str(checked)
def main() -> gr.Blocks:
demo = gr.Blocks()
with demo:
checkbox = gr.Checkbox(False)
text_output = gr.Markdown("False")
checkbox.change(
update,
checkbox,
text_output,
)
return demo
if __name__ == "__main__":
demo = main()
demo.launch()
```
[Link to Demo Space](https://huggingface.co/spaces/Fanilo/gradio-sandbox)
### Screenshot

### Logs
```shell
No logs in Python/JS
```
### System Info
```shell
Gradio 3.0.3
Windows
Firefox/Chrome latest stable
```
### Severity
serious, but I can work around it
| Thanks @andfanilo, can reproduce this issue as well! | 2022-06-03T05:13:01 |
|
gradio-app/gradio | 1,460 | gradio-app__gradio-1460 | [
"1442"
] | 282748b4affdea18d9e08f6c8041084640e232d8 | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -7,11 +7,11 @@
import random
import sys
import time
-import warnings
import webbrowser
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
-from fastapi.concurrency import run_in_threadpool
+import anyio
+from anyio import CapacityLimiter
from gradio import encryptor, external, networking, queueing, routes, strings, utils
from gradio.context import Context
@@ -218,6 +218,7 @@ def __init__(
mode (str): a human-friendly name for the kind of Blocks interface being created.
"""
# Cleanup shared parameters with Interface #TODO: is this part still necessary after Interface with Blocks?
+ self.limiter = None
self.save_to = None
self.api_mode = False
self.theme = theme
@@ -429,7 +430,9 @@ async def call_function(self, fn_index, processed_input):
if inspect.iscoroutinefunction(block_fn.fn):
prediction = await block_fn.fn(*processed_input)
else:
- prediction = await run_in_threadpool(block_fn.fn, *processed_input)
+ prediction = await anyio.to_thread.run_sync(
+ block_fn.fn, *processed_input, limiter=self.limiter
+ )
duration = time.time() - start
return prediction, duration
@@ -525,6 +528,11 @@ async def process_api(
"average_duration": block_fn.total_runtime / block_fn.total_runs,
}
+ async def create_limiter(self, max_threads: Optional[int]):
+ self.limiter = (
+ None if max_threads is None else CapacityLimiter(total_tokens=max_threads)
+ )
+
def get_config(self):
return {"type": "column"}
@@ -645,6 +653,7 @@ def launch(
share: bool = False,
debug: bool = False,
enable_queue: bool = None,
+ max_threads: Optional[int] = None,
auth: Optional[Callable | Tuple[str, str] | List[Tuple[str, str]]] = None,
auth_message: Optional[str] = None,
prevent_thread_lock: bool = False,
@@ -678,6 +687,7 @@ def launch(
server_name (str | None): to make app accessible on local network, set this to "0.0.0.0". Can be set by environment variable GRADIO_SERVER_NAME. If None, will use "127.0.0.1".
show_tips (bool): if True, will occasionally show tips about new Gradio features
enable_queue (bool | None): if True, inference requests will be served through a queue instead of with parallel threads. Required for longer inference times (> 1min) to prevent timeout. The default option in HuggingFace Spaces is True. The default option elsewhere is False.
+ max_threads (int | None): allow up to `max_threads` to be processed in parallel. The default is inherited from the starlette library (currently 40).
width (int): The width in pixels of the iframe element containing the interface (used if inline=True)
height (int): The height in pixels of the iframe element containing the interface (used if inline=True)
encrypt (bool): If True, flagged data will be encrypted by key provided by creator at launch
@@ -710,7 +720,7 @@ def launch(
self.enable_queue = True
else:
self.enable_queue = enable_queue or False
-
+ utils.synchronize_async(self.create_limiter, max_threads)
self.config = self.get_config_file()
self.share = share
self.encrypt = encrypt
diff --git a/gradio/utils.py b/gradio/utils.py
--- a/gradio/utils.py
+++ b/gradio/utils.py
@@ -2,8 +2,7 @@
from __future__ import annotations
-import copy
-import csv
+import asyncio
import inspect
import json
import json.decoder
@@ -311,3 +310,9 @@ def component_or_layout_class(cls_name: str) -> Component | BlockContext:
):
return cls
raise ValueError(f"No such component or layout: {cls_name}")
+
+
+def synchronize_async(func: Callable, *args: object, callback_func: Callable = None):
+ event_loop = asyncio.get_event_loop()
+ task = event_loop.create_task(func(*args))
+ task.add_done_callback(callback_func)
| Allow increase of max parallel requests
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
The maximum number of threads being processed in parallel is set to `40` (see below for the reason why). This creates a contention queue at the server, even if the payload function is capable of more parallelism, and despite the server `backlog` being set to 2048 by default. This is happening in our dalle-mini space, where requests are being sent to a backend server that runs behind a load balancer: under high-load situations requests wait at the gradio server even though our server could absorb them.
**Describe the solution you'd like**
- Create a mechanism to increase the limit.
- (Optionally) Create an option, possibly in `launch`, to set the desired limit.
**Additional context**
The limit happens when requesting predictions using `await run_in_threadpool(block_fn.fn, *processed_input)` in `blocks.py`. This goes to the Starlette server, which in turn uses `anyio` to abstract threading operations. Inspecting the configuration of the default `CapacityLimiter` shows that the limit is set to 40:
```
anyio.to_thread.current_default_thread_limiter().total_tokens
```
I can work on a pull request to address this improvement.
| @pcuenca this is a very beautiful issue, thx a lot ❤️.
A PR would be great!
However I have some hesitation about how would increasing max number of threads from 40 to x benefit to the performance. Because python does threading just in one core. It could even worsen the performance because of context changes. | 2022-06-03T08:58:23 |
|
gradio-app/gradio | 1,511 | gradio-app__gradio-1511 | [
"1510"
] | fdbecafffb7d35f7c6cc6c6c06fca47af22f480f | diff --git a/demo/blocks_form/run.py b/demo/blocks_form/run.py
--- a/demo/blocks_form/run.py
+++ b/demo/blocks_form/run.py
@@ -9,7 +9,7 @@
submit_btn = gr.Button("Submit")
diagnosis_box = gr.Textbox(label="Diagnosis")
- patient_summary_box = gr.Textbox(label="Patient Summary")
+ patient_summary_box = gr.Textbox(label="Patient Summary", visible=False)
def submit(name, age, symptoms):
if len(name) == 0:
@@ -18,7 +18,7 @@ def submit(name, age, symptoms):
return {error_box: gr.update(value="Enter valid age", visible=True)}
return {
diagnosis_box: "covid" if "Cough" in symptoms else "flu",
- patient_summary_box: f"{name}, {age} y/o",
+ patient_summary_box: gr.update(value=f"{name}, {age} y/o", visible=True)
}
submit_btn.click(
| visible kwarg not attached to many Components
make visible kwarg work for all components
| 2022-06-09T20:57:32 |
||
gradio-app/gradio | 1,519 | gradio-app__gradio-1519 | [
"1386"
] | f41ebdd616ead34e11da617eff144126c08ef33c | diff --git a/demo/blocks_multiple_event_triggers/run.py b/demo/blocks_multiple_event_triggers/run.py
new file mode 100644
--- /dev/null
+++ b/demo/blocks_multiple_event_triggers/run.py
@@ -0,0 +1,34 @@
+import gradio as gr
+import pypistats
+from datetime import date
+from dateutil.relativedelta import relativedelta
+import pandas as pd
+
+pd.options.plotting.backend = "plotly"
+
+
+def get_plot(lib, time):
+ data = pypistats.overall(lib, total=True, format="pandas")
+ data = data.groupby("category").get_group("with_mirrors").sort_values("date")
+ start_date = date.today() - relativedelta(months=int(time.split(" ")[0]))
+ data = data[(data['date'] > str(start_date))]
+ chart = data.plot(x="date", y="downloads")
+ return chart
+
+
+with gr.Blocks() as demo:
+ gr.Markdown(
+ """
+ ## Pypi Download Stats 📈
+ See live download stats for all of Hugging Face's open-source libraries 🤗
+ """)
+ with gr.Row():
+ lib = gr.Dropdown(["transformers", "datasets", "huggingface-hub", "gradio", "accelerate"], label="Library")
+ time = gr.Dropdown(["3 months", "6 months", "9 months", "12 months"], label="Downloads over the last...")
+
+ plt = gr.Plot()
+ # You can add multiple event triggers in 2 lines like this
+ for event in [lib.change, time.change, demo.load]:
+ event(get_plot, [lib, time], [plt])
+
+demo.launch()
| Couple event triggers together
- [X] I have searched to see if a similar issue already exists.
We might want to enable coupling event trigger together for ease of readibility within the code.
[Example ](https://huggingface.co/spaces/huggingface/library-metrics/blob/main/app.py)from @abidlabs:
```
lib.change(get_plot, [lib, time], plt)
time.change(get_plot, [lib, time], plt)
demo.load(get_plot, [lib, time], plt)
```
My Suggestion:
```
create_events( [lib.change, time.change, demo.load], [lib,time], [plt] )
```
| Upon further reflection, I think we should avoid creating another method and just let people use plain, readable Python for this:
```py
for event in [lib.change, time.change, demo.load]:
event(get_plot, [lib, time], plt)
```
Agreed, but let's have an example demo for that. | 2022-06-10T06:20:25 |
|
gradio-app/gradio | 1,649 | gradio-app__gradio-1649 | [
"1647"
] | c323aac2747307c3ce3e3b6de55cfe5b166cf5d2 | diff --git a/demo/blocks_essay_update/run.py b/demo/blocks_essay_update/run.py
new file mode 100644
--- /dev/null
+++ b/demo/blocks_essay_update/run.py
@@ -0,0 +1,23 @@
+import gradio as gr
+
+
+def change_textbox(choice):
+ if choice == "short":
+ return gr.update(lines=2, visible=True)
+ elif choice == "long":
+ return gr.update(lines=8, visible=True)
+ else:
+ return gr.update(visible=False)
+
+
+with gr.Blocks() as demo:
+ radio = gr.Radio(
+ ["short", "long", "none"], label="What kind of essay would you like to write?"
+ )
+ text = gr.Textbox(lines=2, interactive=True)
+
+ radio.change(fn=change_textbox, inputs=radio, outputs=text)
+
+
+if __name__ == "__main__":
+ demo.launch()
\ No newline at end of file
diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -192,11 +192,24 @@ def __get__(self, instance, type_):
return descr_get(instance, type_)
+@document()
def update(**kwargs) -> dict:
"""
- Updates component parameters
- @param kwargs: Updating component parameters
- @return: Updated component parameters
+ Updates component parameters.
+ This is a shorthand for using the update method on a component.
+ For example, rather than using gr.Number.update(...) you can just use gr.update(...).
+
+ Demos: blocks_update, blocks_essay_update
+
+ Parameters:
+ kwargs: Key-word arguments used to update the component's properties.
+ Example:
+ import gradio as gr
+ with gr.Blocks() as demo:
+ radio = gr.Radio([1, 2, 4], label="Set the value of the number")
+ number = gr.Number(value=2, interactive=True)
+ radio.change(fn=lambda value: gr.update(value=value), inputs=radio, outputs=number)
+ demo.launch()
"""
kwargs["__type__"] = "generic_update"
return kwargs
diff --git a/gradio/documentation.py b/gradio/documentation.py
--- a/gradio/documentation.py
+++ b/gradio/documentation.py
@@ -57,7 +57,9 @@ def document_fn(fn):
description_doc = " ".join(description)
parameter_docs = []
for param_name, param in signature.parameters.items():
- if param_name.startswith("_") or param_name == "kwargs":
+ if param_name.startswith("_"):
+ continue
+ if param_name == "kwargs" and param_name not in parameters:
continue
parameter_doc = {
"name": param_name,
@@ -128,7 +130,8 @@ def generate_documentation():
for mode, class_list in classes_to_document.items():
documentation[mode] = []
for cls, fns in class_list:
- _, parameter_doc, return_doc, _ = document_fn(cls.__init__)
+ fn_to_document = cls if inspect.isfunction(cls) else cls.__init__
+ _, parameter_doc, return_doc, _ = document_fn(fn_to_document)
cls_description, cls_tags, cls_example = document_cls(cls)
cls_documentation = {
"class": cls,
| Add gr.update to docs
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
As a gradio user, I wish the `gr.update` function was documented on the website. It would be most helpful if the documentation explained the difference between the standalone function and the `update` class method.
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Additional context**
Add any other context or screenshots about the feature request here.
| 2022-06-27T21:27:07 |
||
gradio-app/gradio | 1,655 | gradio-app__gradio-1655 | [
"1124"
] | 4deab89404370347e221d6c5aa1b2f0c3ba59060 | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -3913,6 +3913,9 @@ def update(
def style(self):
return self
+ def postprocess(self, y):
+ return y
+
class StatusTracker(Component):
"""
| diff --git a/test/test_external.py b/test/test_external.py
--- a/test/test_external.py
+++ b/test/test_external.py
@@ -173,7 +173,7 @@ def test_english_to_spanish(self):
def test_sentiment_model(self):
io = gr.Interface.load("models/distilbert-base-uncased-finetuned-sst-2-english")
try:
- output = io("I am happy, I love you.")
+ output = io("I am happy, I love you")
self.assertGreater(output["POSITIVE"], 0.5)
except TooManyRequestsError:
pass
diff --git a/test/test_interfaces.py b/test/test_interfaces.py
--- a/test/test_interfaces.py
+++ b/test/test_interfaces.py
@@ -7,6 +7,7 @@
import mlflow
import requests
import wandb
+from fastapi.testclient import TestClient
from gradio.blocks import Blocks
from gradio.interface import Interface, TabbedInterface, close_all, os
@@ -219,5 +220,39 @@ def test_deprecation_notice(self):
_ = Interface(lambda x: x, "textbox", "textbox", verbose=True)
+class TestInterfaceInterpretation(unittest.TestCase):
+ def test_interpretation_from_interface(self):
+ def quadratic(num1: float, num2: float) -> float:
+ return 3 * num1**2 + num2
+
+ iface = Interface(
+ fn=quadratic,
+ inputs=["number", "number"],
+ outputs="number",
+ interpretation="default",
+ )
+
+ app, _, _ = iface.launch(prevent_thread_lock=True)
+ client = TestClient(app)
+
+ btn = next(
+ c["id"]
+ for c in iface.config["components"]
+ if c["props"].get("value") == "Interpret"
+ )
+ fn_index = next(
+ i
+ for i, d in enumerate(iface.config["dependencies"])
+ if d["targets"] == [btn]
+ )
+
+ response = client.post(
+ "/api/predict/", json={"fn_index": fn_index, "data": [10, 50, 350]}
+ )
+ self.assertTrue(response.json()["data"][0]["interpretation"] is not None)
+ iface.close()
+ close_all()
+
+
if __name__ == "__main__":
unittest.main()
| Interpretations don't work on latest main branch
### Describe the bug
I can't run the default interpretation for a simple app with a single numeric input and label output on the latest commit to main.
### Reproduction
```python
import gradio as gr
def is_greater_than_50(num: int) -> bool:
return num > 50
iface = gr.Interface(fn=is_greater_than_50, inputs="number",
outputs="label", interpretation="default")
iface.launch()
```
After hitting `Interpret`, server will throw the following exception and not render an interpretation
```
AttributeError: 'Interpretation' object has no attribute 'postprocess'
```
### Screenshot

### Logs
```shell
(gradio-dev) freddy@DESKTOP-V50OCDE:~/sources/gradio-scripts$ python hello_world.py
Running on local URL: http://127.0.0.1:7861/
To create a public link, set `share=True` in `launch()`.
Traceback (most recent call last):
File "/home/freddy/sources/gradio/gradio/routes.py", line 247, in predict
output = await run_in_threadpool(
File "/home/freddy/miniconda3/envs/gradio-dev/lib/python3.9/site-packages/starlette/concurrency.py", line 39, in run_in_threadpool
return await anyio.to_thread.run_sync(func, *args)
File "/home/freddy/miniconda3/envs/gradio-dev/lib/python3.9/site-packages/anyio/to_thread.py", line 28, in run_sync
return await get_asynclib().run_sync_in_worker_thread(func, *args, cancellable=cancellable,
File "/home/freddy/miniconda3/envs/gradio-dev/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 818, in run_sync_in_worker_thread
return await future
File "/home/freddy/miniconda3/envs/gradio-dev/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 754, in run
result = context.run(func, *args)
File "/home/freddy/sources/gradio/gradio/blocks.py", line 315, in process_api
block.postprocess(predictions[i])
AttributeError: 'Interpretation' object has no attribute 'postprocess'
```
```
### System Info
```shell
On commit `6d0a6ccb83608e255e1b0ee83b2aa27d96b4c774`, on WSL
```
### Severity
critical
| Hi, I am facing the same problem. The interpret button does not work for this demo https://huggingface.co/course/chapter9/6?fw=pt.
Is there any update on this? Thanks. | 2022-06-28T18:11:44 |
gradio-app/gradio | 1,667 | gradio-app__gradio-1667 | [
"1663"
] | 8c9a9a9696c54d109a8a2ca808b34221f56b0a90 | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -2119,7 +2119,7 @@ def postprocess(self, y):
return processing_utils.encode_url_or_file_to_base64(y)
def deserialize(self, x):
- file = processing_utils.decode_base64_to_file(x["data"])
+ file = processing_utils.decode_base64_to_file(x)
return file.name
def stream(
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -843,7 +843,9 @@ def test_component_functions(self):
},
)
self.assertTrue(
- audio_output.deserialize(deepcopy(media_data.BASE64_AUDIO)).endswith(".wav")
+ audio_output.deserialize(
+ deepcopy(media_data.BASE64_AUDIO)["data"]
+ ).endswith(".wav")
)
with tempfile.TemporaryDirectory() as tmpdirname:
to_save = audio_output.save_flagged(
| TypeError in TTS demo
### Describe the bug
Looking to demo a text-to-speech model using the [fastspeech2 checkpoint](https://huggingface.co/facebook/fastspeech2-en-ljspeech) from the HF hub as follows:
```python
import gradio as gr
gr.Interface.load("huggingface/facebook/fastspeech2-en-ljspeech").launch();
```
This yields the following demo: https://45412.gradio.app/
However, when I try and run the demo with any sort of input I just get `ERROR`. The demo should work as it does on the fastspeech2 model card: https://huggingface.co/facebook/fastspeech2-en-ljspeech
cc @AK391
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```python
import gradio as gr
gr.Interface.load("huggingface/facebook/fastspeech2-en-ljspeech").launch(debug=True);
```
### Screenshot
_No response_
### Logs
```shell
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/gradio/routes.py", line 256, in run_predict
fn_index, raw_input, username, session_state
File "/usr/local/lib/python3.7/dist-packages/gradio/blocks.py", line 546, in process_api
predictions, duration = await self.call_function(fn_index, processed_input)
File "/usr/local/lib/python3.7/dist-packages/gradio/blocks.py", line 462, in call_function
block_fn.fn, *processed_input, limiter=self.limiter
File "/usr/local/lib/python3.7/dist-packages/anyio/to_thread.py", line 32, in run_sync
func, *args, cancellable=cancellable, limiter=limiter
File "/usr/local/lib/python3.7/dist-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "/usr/local/lib/python3.7/dist-packages/anyio/_backends/_asyncio.py", line 867, in run
result = context.run(func, *args)
File "/usr/local/lib/python3.7/dist-packages/gradio/interface.py", line 509, in <lambda>
if len(self.output_components) == 1
File "/usr/local/lib/python3.7/dist-packages/gradio/interface.py", line 725, in run_prediction
pred
File "/usr/local/lib/python3.7/dist-packages/gradio/components.py", line 2103, in deserialize
file = processing_utils.decode_base64_to_file(x["data"])
TypeError: string indices must be integers
```
### System Info
```shell
Gradio version: 3.0.20
System: G-colab (fresh install)
```
### Severity
blocking all usage of gradio
| 2022-06-29T17:02:27 |
|
gradio-app/gradio | 1,669 | gradio-app__gradio-1669 | [
"1648"
] | 6bd4e17e69e3cf68a3f1de74a7a9432e1327a839 | diff --git a/gradio/__init__.py b/gradio/__init__.py
--- a/gradio/__init__.py
+++ b/gradio/__init__.py
@@ -1,4 +1,4 @@
-import pkg_resources
+from importlib.metadata import version
import gradio.components as components
import gradio.inputs as inputs
@@ -67,5 +67,5 @@
Webcam,
)
-current_pkg_version = pkg_resources.require("gradio")[0].version
+current_pkg_version = version("gradio")
__version__ = current_pkg_version
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -15,6 +15,9 @@
version = (this_directory / "gradio" / "version.txt").read_text(
encoding='utf8').strip()
+with open("requirements.txt") as reqs:
+ requirements = reqs.readlines()
+
setup(
name="gradio",
version=version,
@@ -28,28 +31,7 @@
packages=["gradio"],
license="Apache License 2.0",
keywords=["machine learning", "visualization", "reproducibility"],
- install_requires=[
- "analytics-python",
- "aiohttp",
- "fastapi",
- "ffmpy",
- "markdown-it-py[linkify,plugins]",
- "matplotlib",
- "numpy",
- "orjson",
- "pandas",
- "paramiko",
- "pillow",
- "pycryptodome",
- "python-multipart",
- "pydub",
- "requests",
- "uvicorn",
- "Jinja2",
- "fsspec",
- "httpx",
- "pydantic",
- ],
+ install_requires=requirements,
entry_points={
'console_scripts': ['gradio=gradio.reload:run_in_reload_mode']
},
| diff --git a/scripts/install_test_requirements.sh b/scripts/install_test_requirements.sh
--- a/scripts/install_test_requirements.sh
+++ b/scripts/install_test_requirements.sh
@@ -5,7 +5,7 @@ if [ -z "$(ls | grep CONTRIBUTING.md)" ]; then
else
echo "Installing requirements for tests"
pip install --upgrade pip
- pip install -r gradio.egg-info/requires.txt
+ pip install -r requirements.txt
pip install -r test/requirements.txt
fi
| List python runtime dependencies in requirements.txt as opposed to setup.py
- [ ] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
As a gradio developer, I think it's frustrating that the `gradio-egg.info` folder is tracked in version control. Whenever I build gradio locally, there are changes in the `gradio-egg.info` directory that are not related to my PR.
I see that `gradio-egg.info` is only included in the repo so that we can
1. cache the env in circleci
2. install all dependencies in the `install_test_requirements.sh` script
**Describe the solution you'd like**
I'd like to specify all dependencies in a `requirements.txt` file. That will allow us to still cache the circle-ci env. I don't think we need to install the gradio dependencies again in `install_test_requirements.sh` since that's done in `install_gradio.sh` but we can still install from `requirements.txt` if necessary.
This will also allow us to remove `gradio-egg.info` from version control.
**Additional context**
Add any other context or screenshots about the feature request here.
| 2022-06-29T19:48:11 |
|
gradio-app/gradio | 1,684 | gradio-app__gradio-1684 | [
"1364"
] | eb42fc3cf874e2252536623462eab02d3d27f07f | diff --git a/demo/blocks_outputs/run.py b/demo/blocks_outputs/run.py
--- a/demo/blocks_outputs/run.py
+++ b/demo/blocks_outputs/run.py
@@ -1,5 +1,26 @@
import gradio as gr
+
+def make_markdown():
+ return [
+ [
+ "# hello again",
+ "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
+ '<img src="https://images.unsplash.com/photo-1574613362884-f79513a5128c?fit=crop&w=500&q=80"/>',
+ ],
+ [
+ "## hello again again",
+ "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
+ '<img src="https://images.unsplash.com/photo-1574613362884-f79513a5128c?fit=crop&w=500&q=80"/>',
+ ],
+ [
+ "### hello thrice",
+ "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
+ '<img src="https://images.unsplash.com/photo-1574613362884-f79513a5128c?fit=crop&w=500&q=80"/>',
+ ],
+ ]
+
+
with gr.Blocks() as demo:
with gr.Column():
txt = gr.Textbox(label="Small Textbox", lines=1, show_label=False)
@@ -43,27 +64,31 @@
gr.Dataframe(
interactive=True, headers=["One", "Two", "Three", "Four"], col_count=4
)
- gr.DataFrame(
+ df = gr.DataFrame(
[
[
+ "# hello",
"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
- "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
- "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
+ '<img src="https://images.unsplash.com/photo-1574613362884-f79513a5128c?fit=crop&w=500&q=80"/>',
],
[
+ "## hello",
"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
- "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
- "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
+ '<img src="https://images.unsplash.com/photo-1574613362884-f79513a5128c?fit=crop&w=500&q=80"/>',
],
[
+ "### hello",
"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
- "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
- "Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.",
+ '<img src="https://images.unsplash.com/photo-1574613362884-f79513a5128c?fit=crop&w=500&q=80"/>',
],
],
headers=["One", "Two", "Three"],
wrap=True,
+ datatype=["markdown", "markdown", "html"],
+ interactive=True,
)
+ btn = gr.Button("Run")
+ btn.click(fn=make_markdown, inputs=None, outputs=df)
if __name__ == "__main__":
diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -2356,6 +2356,8 @@ class Dataframe(Changeable, IOComponent):
Demos: filter_records, matrix_transpose, tax_calculator
"""
+ markdown_parser = None
+
def __init__(
self,
value: Optional[List[List[Any]]] = None,
@@ -2405,13 +2407,17 @@ def __init__(
self.__validate_headers(headers, self.col_count[0])
self.headers = headers
- self.datatype = datatype
+ self.datatype = (
+ datatype if isinstance(datatype, list) else [datatype] * self.col_count[0]
+ )
self.type = type
values = {
"str": "",
"number": 0,
"bool": False,
"date": "01/01/1970",
+ "markdown": "",
+ "html": "",
}
column_dtypes = (
[datatype] * self.col_count[0] if isinstance(datatype, str) else datatype
@@ -2419,7 +2425,10 @@ def __init__(
self.test_input = [
[values[c] for c in column_dtypes] for _ in range(self.row_count[0])
]
+
self.value = value if value is not None else self.test_input
+ self.value = self.__process_markdown(self.value, datatype)
+
self.max_rows = max_rows
self.max_cols = max_cols
self.overflow_row_behaviour = overflow_row_behaviour
@@ -2520,16 +2529,24 @@ def postprocess(self, y):
if y is None:
return y
if isinstance(y, str):
- y = pd.read_csv(str)
- return {"headers": list(y.columns), "data": y.values.tolist()}
+ y = pd.read_csv(y)
+ return {
+ "headers": list(y.columns),
+ "data": Dataframe.__process_markdown(y.values.tolist(), self.datatype),
+ }
if isinstance(y, pd.DataFrame):
- return {"headers": list(y.columns), "data": y.values.tolist()}
+ return {
+ "headers": list(y.columns),
+ "data": Dataframe.__process_markdown(y.values.tolist(), self.datatype),
+ }
if isinstance(y, (np.ndarray, list)):
if isinstance(y, np.ndarray):
y = y.tolist()
if len(y) == 0 or not isinstance(y[0], list):
y = [y]
- return {"data": y}
+ return {
+ "data": Dataframe.__process_markdown(y, self.datatype),
+ }
raise ValueError("Cannot process value as a Dataframe")
@staticmethod
@@ -2550,10 +2567,24 @@ def __validate_headers(headers: List[str] | None, col_count: int):
)
)
+ @classmethod
+ def __process_markdown(cls, data: List[List[Any]], datatype: List[str]):
+ if "markdown" not in datatype:
+ return data
+
+ if cls.markdown_parser is None:
+ cls.markdown_parser = MarkdownIt()
+
+ for i in range(len(data)):
+ for j in range(len(data[i])):
+ if datatype[j] == "markdown":
+ data[i][j] = Dataframe.markdown_parser.render(data[i][j])
+
+ return data
+
def style(
self,
rounded: Optional[bool | Tuple[bool, bool, bool, bool]] = None,
- border: Optional[bool | Tuple[bool, bool, bool, bool]] = None,
):
return IOComponent.style(
self,
@@ -2695,7 +2726,6 @@ def postprocess(self, y):
def style(
self,
rounded: Optional[bool | Tuple[bool, bool, bool, bool]] = None,
- border: Optional[bool | Tuple[bool, bool, bool, bool]] = None,
):
return IOComponent.style(
self,
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -1022,7 +1022,7 @@ def test_component_functions(self):
dataframe_input.get_config(),
{
"headers": ["Name", "Age", "Member"],
- "datatype": "str",
+ "datatype": ["str", "str", "str"],
"row_count": (3, "dynamic"),
"col_count": (3, "dynamic"),
"value": [
@@ -1079,7 +1079,7 @@ def test_component_functions(self):
"style": {},
"elem_id": None,
"visible": True,
- "datatype": "str",
+ "datatype": ["str", "str", "str"],
"row_count": (3, "dynamic"),
"col_count": (3, "dynamic"),
"value": [
| Dataframe allow Markdown or HTML content
- [X] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
Simple example here: https://huggingface.co/spaces/Gradio-Blocks/Leaderboard. Currently it's not possible to create hyperlinks inside a Dataframe table. I can see other use cases beyond `<a>` elements, maybe images or simple video previews? I'm thinking that would be handy to preview rich content Dataframes outputs. I understand the performance issues with large tables, but I guess it needs lazy loading anyway.
| +1!! This would be hugely useful
cc @pngwn for his thoughts | 2022-07-01T13:48:28 |
gradio-app/gradio | 1,685 | gradio-app__gradio-1685 | [
"1569"
] | eb42fc3cf874e2252536623462eab02d3d27f07f | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -1233,9 +1233,7 @@ def postprocess(self, y):
Returns:
(str): string of choice
"""
- return (
- y if y is not None else self.choices[0] if len(self.choices) > 0 else None
- )
+ return y
def deserialize(self, x):
"""
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -537,7 +537,7 @@ def test_component_functions(self):
radio_input.get_config(),
{
"choices": ["a", "b", "c"],
- "value": "a",
+ "value": None,
"name": "radio",
"show_label": True,
"label": "Pick Your One Input",
| Dropdown is disabled when created with blocks
### Describe the bug
Hi,
Tried creating gradio app with blocks, Dropdown was disabled after launch.
### Is there an existing issue for this?
- [x] I have searched the existing issues
### Reproduction
```
import gradio as gr
demo = gr.Blocks()
with demo:
with gr.Tabs():
with gr.TabItem("Record Video"):
with gr.Row():
inp1=gr.inputs.Video(source="webcam",optional=False,label='Capture Video')
inp2=gr.inputs.Video(source="webcam",optional=False,label='Capture Video')
with gr.Row():
subject_id = gr.inputs.Dropdown(["cat", "dog", "bird"])
demo.launch()
```
### Screenshot
<img width="1283" alt="image" src="https://user-images.githubusercontent.com/102731859/173801150-712bc70e-479a-456e-a12b-7e0bb293bdf5.png">
<img width="915" alt="image" src="https://user-images.githubusercontent.com/102731859/173801336-8f571c6d-026e-457e-8269-192f5d4ecc4d.png">
### Logs
```shell
NA
```
### System Info
```shell
Browser: Chrome
Gradio Version: 3.0.17
```
### Severity
blocking all usage of gradio
| Thanks for raising this issue. Once you make an event listener that takes the dropdown as an input, the dropdown will be activated. But I thought we removed disabled for elements by default, @pngwn?
Will fix. For now, you can either use `gr.Dropdown(choices=[...], interactive=True)`, or just create an event listener that takes the dropdown as input.
@aliabid94 thanks, suggestion worked!
re-opening for now just to close this when components are interactive by default.
Btw @aliabid94 it looks some of the other form components are also not interactive by default: e.g. `Radio` and `Slider` and `Checkbox` and possibly others
This feels like a regression, I'm pretty sure we had this working fine a short while ago. Will investigate. | 2022-07-01T14:10:00 |
gradio-app/gradio | 1,686 | gradio-app__gradio-1686 | [
"1220"
] | 745e69d75c34c25db3b85483f9f9d3efa7c3a9a0 | diff --git a/demo/webcam/run.py b/demo/webcam/run.py
--- a/demo/webcam/run.py
+++ b/demo/webcam/run.py
@@ -3,11 +3,15 @@
import gradio as gr
-def snap(image):
- return np.flipud(image)
+def snap(image, video):
+ return [image, video]
-demo = gr.Interface(snap, gr.Image(source="webcam", tool=None), "image")
+demo = gr.Interface(
+ snap,
+ [gr.Image(source="webcam", tool=None), gr.Video(source="webcam")],
+ ["image", "video"],
+)
if __name__ == "__main__":
demo.launch()
diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -10,6 +10,7 @@
import numbers
import operator
import os
+import pathlib
import shutil
import tempfile
import warnings
@@ -1337,6 +1338,7 @@ def __init__(
visible: bool = True,
streaming: bool = False,
elem_id: Optional[str] = None,
+ mirror_webcam: bool = True,
**kwargs,
):
"""
@@ -1354,7 +1356,9 @@ def __init__(
visible (bool): If False, component will be hidden.
streaming (bool): If True when used in a `live` interface, will automatically stream webcam feed. Only valid is source is 'webcam'.
elem_id (Optional[str]): An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
+ mirror_webcam (bool): If True webcam will be mirrored. Default is True.
"""
+ self.mirror_webcam = mirror_webcam
self.type = type
self.value = self.postprocess(value)
self.shape = shape
@@ -1388,6 +1392,7 @@ def get_config(self):
"tool": self.tool,
"value": self.value,
"streaming": self.streaming,
+ "mirror_webcam": self.mirror_webcam,
**IOComponent.get_config(self),
}
@@ -1461,6 +1466,8 @@ def preprocess(self, x: Optional[str]) -> np.array | PIL.Image | str | None:
im = processing_utils.resize_and_crop(im, self.shape)
if self.invert_colors:
im = PIL.ImageOps.invert(im)
+ if self.source == "webcam" and self.mirror_webcam is True:
+ im = PIL.ImageOps.mirror(im)
if not (self.tool == "sketch"):
return self.format_image(im, fmt)
@@ -1693,6 +1700,7 @@ def __init__(
interactive: Optional[bool] = None,
visible: bool = True,
elem_id: Optional[str] = None,
+ mirror_webcam: bool = True,
**kwargs,
):
"""
@@ -1705,9 +1713,11 @@ def __init__(
interactive (Optional[bool]): if True, will allow users to upload a video; if False, can only be used to display videos. If not provided, this is inferred based on whether the component is used as an input or output.
visible (bool): If False, component will be hidden.
elem_id (Optional[str]): An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
+ mirror_webcam (bool): If True webcma will be mirrored. Default is True.
"""
self.format = format
self.source = source
+ self.mirror_webcam = mirror_webcam
self.value = self.postprocess(value)
IOComponent.__init__(
self,
@@ -1723,6 +1733,7 @@ def get_config(self):
return {
"source": self.source,
"value": self.value,
+ "mirror_webcam": self.mirror_webcam,
**IOComponent.get_config(self),
}
@@ -1771,11 +1782,21 @@ def preprocess(self, x: Dict[str, str] | None) -> str | None:
)
file_name = file.name
uploaded_format = file_name.split(".")[-1].lower()
+
if self.format is not None and uploaded_format != self.format:
output_file_name = file_name[0 : file_name.rindex(".") + 1] + self.format
ff = FFmpeg(inputs={file_name: None}, outputs={output_file_name: None})
ff.run()
return output_file_name
+ elif self.source == "webcam" and self.mirror_webcam is True:
+ path = pathlib.Path(file_name)
+ output_file_name = str(path.with_stem(f"{path.stem}_flip"))
+ ff = FFmpeg(
+ inputs={file_name: None},
+ outputs={output_file_name: ["-vf", "hflip", "-c:a", "copy"]},
+ )
+ ff.run()
+ return output_file_name
else:
return file_name
| diff --git a/gradio/test_data/blocks_configs.py b/gradio/test_data/blocks_configs.py
--- a/gradio/test_data/blocks_configs.py
+++ b/gradio/test_data/blocks_configs.py
@@ -46,6 +46,7 @@
"source": "upload",
"tool": "editor",
"streaming": False,
+ "mirror_webcam": True,
"show_label": True,
"name": "image",
"visible": True,
@@ -86,6 +87,7 @@
"source": "upload",
"tool": "editor",
"streaming": False,
+ "mirror_webcam": True,
"show_label": True,
"name": "image",
"visible": True,
@@ -232,6 +234,7 @@
"source": "upload",
"tool": "editor",
"streaming": False,
+ "mirror_webcam": True,
"show_label": True,
"name": "image",
"visible": True,
@@ -277,6 +280,7 @@
"source": "upload",
"tool": "editor",
"streaming": False,
+ "mirror_webcam": True,
"show_label": True,
"name": "image",
"visible": True,
@@ -433,6 +437,7 @@
"image_mode": "RGB",
"source": "upload",
"streaming": False,
+ "mirror_webcam": True,
"tool": "editor",
"name": "image",
"style": {},
@@ -480,6 +485,7 @@
"source": "upload",
"tool": "editor",
"streaming": False,
+ "mirror_webcam": True,
"name": "image",
"style": {},
},
diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -610,6 +610,7 @@ def test_component_functions(self):
"visible": True,
"value": None,
"interactive": None,
+ "mirror_webcam": True,
},
)
self.assertIsNone(image_input.preprocess(None))
@@ -1174,6 +1175,7 @@ def test_component_functions(self):
"visible": True,
"value": None,
"interactive": None,
+ "mirror_webcam": True,
},
)
self.assertIsNone(video_input.preprocess(None))
| Mirroring for webcam inputs
- Mirror webcam's by default with necessary preprocessing.
- kwarg to disable mirror
| 2022-07-01T15:46:46 |
|
gradio-app/gradio | 1,709 | gradio-app__gradio-1709 | [
"1652"
] | 745e69d75c34c25db3b85483f9f9d3efa7c3a9a0 | diff --git a/demo/blocks_plug/run.py b/demo/blocks_plug/run.py
--- a/demo/blocks_plug/run.py
+++ b/demo/blocks_plug/run.py
@@ -1,9 +1,14 @@
import gradio as gr
+
+def change_tab():
+ return gr.Tabs.update(selected=2)
+
+
identity_demo, input_demo, output_demo = gr.Blocks(), gr.Blocks(), gr.Blocks()
with identity_demo:
- gr.Interface(lambda x:x, "text", "text")
+ gr.Interface(lambda x: x, "text", "text")
with input_demo:
t = gr.Textbox(label="Enter your text here")
@@ -17,14 +22,15 @@
with gr.Blocks() as demo:
gr.Markdown("Three demos in one!")
- with gr.Tabs():
- with gr.TabItem("Text Identity"):
+ with gr.Tabs(selected=1) as tabs:
+ with gr.TabItem("Text Identity", id=0):
identity_demo.render()
- with gr.TabItem("Text Input"):
+ with gr.TabItem("Text Input", id=1):
input_demo.render()
- with gr.TabItem("Text Static"):
+ with gr.TabItem("Text Static", id=2):
output_demo.render()
-
+ btn = gr.Button("Change tab")
+ btn.click(inputs=None, outputs=tabs, fn=change_tab)
if __name__ == "__main__":
demo.launch()
diff --git a/gradio/layouts.py b/gradio/layouts.py
--- a/gradio/layouts.py
+++ b/gradio/layouts.py
@@ -96,6 +96,26 @@ class Tabs(BlockContext):
Tabs context.
"""
+ def __init__(self, selected: Optional[int | str] = None, **kwargs):
+ """
+ Parameters:
+ label (str): The visual label for the tab
+ selected: (Optional[int | str]): The currently selected tab. Must correspdong to an id passed to the one of the child TabItems. Defaults to the first TabItem.
+ """
+ super().__init__(**kwargs)
+ self.selected = selected
+
+ def get_config(self):
+ return {"selected": self.selected, **super().get_config()}
+
+ def update(
+ selected: Optional[int | str] = None,
+ ):
+ return {
+ "selected": selected,
+ "__type__": "update",
+ }
+
def change(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
"""
Parameters:
@@ -113,12 +133,18 @@ class TabItem(BlockContext):
components defined within the TabItem will be rendered within a tab.
"""
- def __init__(self, label, **kwargs):
+ def __init__(self, label: str, id: Optional[int | str] = None, **kwargs):
+ """
+ Parameters:
+ label (str): The visual label for the tab
+ id: (Optional[int | str]): An optional identifier for the tab, required if you wish to control the selected tab from a predict function.
+ """
super().__init__(**kwargs)
self.label = label
+ self.id = id
def get_config(self):
- return {"label": self.label, **super().get_config()}
+ return {"label": self.label, "id": self.id, **super().get_config()}
def select(self, fn: Callable, inputs: List[Component], outputs: List[Component]):
"""
| diff --git a/gradio/test_data/blocks_configs.py b/gradio/test_data/blocks_configs.py
--- a/gradio/test_data/blocks_configs.py
+++ b/gradio/test_data/blocks_configs.py
@@ -27,11 +27,22 @@
"style": {},
},
},
- {"id": 3, "type": "tabs", "props": {"visible": True, "style": {}}},
+ {
+ "id": 3,
+ "type": "tabs",
+ "props": {
+ "visible": True,
+ "style": {},
+ },
+ },
{
"id": 4,
"type": "tabitem",
- "props": {"label": "X-ray", "visible": True, "style": {}},
+ "props": {
+ "label": "X-ray",
+ "visible": True,
+ "style": {},
+ },
},
{
"id": 5,
@@ -71,7 +82,11 @@
{
"id": 9,
"type": "tabitem",
- "props": {"label": "CT Scan", "visible": True, "style": {}},
+ "props": {
+ "label": "CT Scan",
+ "visible": True,
+ "style": {},
+ },
},
{
"id": 10,
@@ -213,11 +228,22 @@
"style": {},
},
},
- {"id": 3, "type": "tabs", "props": {"visible": True, "style": {}}},
+ {
+ "id": 3,
+ "type": "tabs",
+ "props": {
+ "visible": True,
+ "style": {},
+ },
+ },
{
"id": 444,
"type": "tabitem",
- "props": {"label": "X-ray", "visible": True, "style": {}},
+ "props": {
+ "label": "X-ray",
+ "visible": True,
+ "style": {},
+ },
},
{
"id": 5,
@@ -262,7 +288,11 @@
{
"id": 9,
"type": "tabitem",
- "props": {"label": "CT Scan", "visible": True, "style": {}},
+ "props": {
+ "label": "CT Scan",
+ "visible": True,
+ "style": {},
+ },
},
{
"id": 10,
@@ -410,7 +440,10 @@
{
"id": 3,
"type": "tabs",
- "props": {"style": {}, "value": True},
+ "props": {
+ "style": {},
+ "value": True,
+ },
},
{
"id": 4,
| Allow currently selected `TabItem` to be set + updated
Currently there is no way to initialise or update the `Tabs` component to show a specific `TabItem` it always defaults to the first.
We need to implement the following:
- Add `selected` kwarg to `Tabs`
- Add `id` kwargs to `TabItem`
- Add an `update` method to `Tabs` that accepts a `selected` kwarg.
- Use the provided `TabItem` `id` as the actual id in the frontend
- Use the provided `selected` `id` passed to `Tabs` as the selected tab.
| 2022-07-05T12:15:12 |
|
gradio-app/gradio | 1,716 | gradio-app__gradio-1716 | [
"1430"
] | 2a67fe6ec99443fb340cf9b37d56dd3161bb50ff | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -16,7 +16,7 @@
import warnings
from copy import deepcopy
from types import ModuleType
-from typing import Any, Callable, Dict, List, Optional, Tuple
+from typing import Any, Callable, Dict, List, Optional, Tuple, TypedDict
import matplotlib.figure
import numpy as np
@@ -2389,7 +2389,11 @@ def style(
)
-@document()
+class DataframeData(TypedDict):
+ headers: List[str]
+ data: List[List[str | int | bool]]
+
+
class Dataframe(Changeable, IOComponent):
"""
Accepts or displays 2D input through a spreadsheet-like component for dataframes.
@@ -2521,26 +2525,22 @@ def update(
}
return IOComponent.add_interactive_to_config(updated_config, interactive)
- def preprocess(
- self, x: List[List[str | Number | bool]]
- ) -> pd.DataFrame | np.ndarray | List[List[str | float | bool]]:
+ def preprocess(self, x: DataframeData):
"""
Parameters:
- x: 2D array of str, numeric, or bool data
+ x (Dict[headers: List[str], data: List[List[str | int | bool]]]): 2D array of str, numeric, or bool data
Returns:
Dataframe in requested format
"""
if self.type == "pandas":
- if self.headers:
- return pd.DataFrame(x, columns=self.headers)
+ if x.get("headers") is not None:
+ return pd.DataFrame(x["data"], columns=x.get("headers"))
else:
- return pd.DataFrame(x)
- if self.col_count[0] == 1:
- x = [row[0] for row in x]
+ return pd.DataFrame(x["data"])
if self.type == "numpy":
- return np.array(x)
+ return np.array(x["data"])
elif self.type == "array":
- return x
+ return x["data"]
else:
raise ValueError(
"Unknown type: "
@@ -2587,8 +2587,6 @@ def postprocess(self, y: str | pd.DataFrame | np.ndarray | List[List[str | float
if isinstance(y, (np.ndarray, list)):
if isinstance(y, np.ndarray):
y = y.tolist()
- if len(y) == 0 or not isinstance(y[0], list):
- y = [y]
return {
"data": Dataframe.__process_markdown(y, self.datatype),
}
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -999,7 +999,10 @@ def test_component_functions(self):
"""
Preprocess, serialize, save_flagged, restore_flagged, generate_sample, get_config
"""
- x_data = [["Tim", 12, False], ["Jan", 24, True]]
+ x_data = {
+ "data": [["Tim", 12, False], ["Jan", 24, True]],
+ "headers": ["Name", "Age", "Member"],
+ }
dataframe_input = gr.Dataframe(headers=["Name", "Age", "Member"])
output = dataframe_input.preprocess(x_data)
self.assertEqual(output["Age"][1], 24)
@@ -1046,7 +1049,7 @@ def test_component_functions(self):
)
dataframe_input = gr.Dataframe()
output = dataframe_input.preprocess(x_data)
- self.assertEqual(output[1][1], 24)
+ self.assertEqual(output["Age"][1], 24)
with self.assertRaises(ValueError):
wrong_type = gr.Dataframe(type="unknown")
wrong_type.preprocess(x_data)
@@ -1120,13 +1123,13 @@ def test_in_interface_as_input(self):
"""
Interface, process,
"""
- x_data = [[1, 2, 3], [4, 5, 6]]
+ x_data = {"data": [[1, 2, 3], [4, 5, 6]]}
iface = gr.Interface(np.max, "numpy", "number")
self.assertEqual(iface.process([x_data]), [6])
- x_data = [["Tim"], ["Jon"], ["Sal"]]
+ x_data = {"data": [["Tim"], ["Jon"], ["Sal"]]}
def get_last(my_list):
- return my_list[-1]
+ return my_list[-1][-1]
iface = gr.Interface(get_last, "list", "text")
self.assertEqual(iface.process([x_data]), ["Sal"])
@@ -1140,7 +1143,9 @@ def check_odd(array):
return array % 2 == 0
iface = gr.Interface(check_odd, "numpy", "numpy")
- self.assertEqual(iface.process([[2, 3, 4]])[0], {"data": [[True, False, True]]})
+ self.assertEqual(
+ iface.process([{"data": [[2, 3, 4]]}])[0], {"data": [[True, False, True]]}
+ )
class TestVideo(unittest.TestCase):
@@ -1356,14 +1361,16 @@ def test_in_interface_as_output(self):
"""
timeseries_output = gr.Timeseries(x="time", y=["retail", "food", "other"])
iface = gr.Interface(lambda x: x, "dataframe", timeseries_output)
- df = pd.DataFrame(
- {
- "time": [1, 2, 3, 4],
- "retail": [1, 2, 3, 2],
- "food": [1, 2, 3, 2],
- "other": [1, 2, 4, 2],
- }
- )
+ df = {
+ "data": pd.DataFrame(
+ {
+ "time": [1, 2, 3, 4],
+ "retail": [1, 2, 3, 2],
+ "food": [1, 2, 3, 2],
+ "other": [1, 2, 4, 2],
+ }
+ )
+ }
self.assertEqual(
iface.process([df]),
[
@@ -1585,6 +1592,7 @@ def test_in_interface(self):
"""
def get_avg_age_per_gender(data):
+ print(data)
return {
"M": int(data[data["gender"] == "M"].mean()),
"F": int(data[data["gender"] == "F"].mean()),
@@ -1603,7 +1611,10 @@ def get_avg_age_per_gender(data):
["O", 20],
["F", 30],
]
- self.assertDictEqual(iface.process([y_data])[0], {"M": 35, "F": 25, "O": 20})
+ self.assertDictEqual(
+ iface.process([{"data": y_data, "headers": ["gender", "age"]}])[0],
+ {"M": 35, "F": 25, "O": 20},
+ )
class TestHTML(unittest.TestCase):
| Dynamic Dataset Size for Dataframe Component
is there any way we can dynamically handle consuming of the dataframe in the background without limiting users to upload a certain size?
Like when we create the dataframe component we have to give a list to headers which should match the number of columns in a dataset in a CSV/TSV file you're dragging to UI. This is not good for below reasons:
- one thing in tabular workflows is that we have so many feature engineering going on, we add modify datasets by adding one hot encoded features or eliminate features according to shap or importance, it would be reaaaalllly essential to have such feature.
- not so many people know about the number of columns of their dataset in a CSV file beforehand.
maybe ping @pngwn for this 🙂
thought: We can change the size through UI, so it should be fine for CSV/TSV file read to have.
| Yesssss let's do this
I think this is a duplicate of #1198 | 2022-07-06T10:14:31 |
gradio-app/gradio | 1,754 | gradio-app__gradio-1754 | [
"1717"
] | 600722c2189d826b7666cabb7adc2766f5c01c6a | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -162,6 +162,11 @@ def __init__(
render: bool = True,
**kwargs,
):
+ """
+ Parameters:
+ visible: If False, this will be hidden but included in the Blocks config file (its visibility can later be updated).
+ render: If False, this will not be included in the Blocks config file at all.
+ """
self.children = []
super().__init__(visible=visible, render=render, **kwargs)
diff --git a/gradio/documentation.py b/gradio/documentation.py
--- a/gradio/documentation.py
+++ b/gradio/documentation.py
@@ -72,6 +72,8 @@ def document_fn(fn):
if type(default) == str:
default = '"' + default + '"'
parameter_doc["default"] = default
+ elif parameter_doc["doc"] is not None and "kwargs" in parameter_doc["doc"]:
+ parameter_doc["kwargs"] = True
parameter_docs.append(parameter_doc)
assert (
len(parameters) == 0
diff --git a/gradio/layouts.py b/gradio/layouts.py
--- a/gradio/layouts.py
+++ b/gradio/layouts.py
@@ -85,7 +85,9 @@ def __init__(
variant: str = "default",
):
"""
- variant: column type, 'default' (no background) or 'panel' (gray background color and rounded corners)
+ Parameters:
+ visible: If False, column will be hidden but included in the Blocks config file (its visibility can later be updated).
+ variant: column type, 'default' (no background) or 'panel' (gray background color and rounded corners)
"""
self.variant = variant
super().__init__(visible=visible)
| Miscellaneous formatting improvements to website after PR 1578
- [x] I have searched to see if a similar issue already exists.
In the review of #1578, we noticed some miscellaneous things we'd like to improve about the new website design:
- [x] No sidebars in the guides
- [x] **kwargs in Series/Parallel is shown as required when it is not

- [x] No example Number/Text (other input component) in the docs
### Old website

### Current website locally

- [x] No example output HighlightedText component in the docs
### Old Website

### Current website locally

- [x] Missing docstring descriptions for parameters of `Row`, `Column`

- [x] Missing description for `color_map` in `Chatbot`
### Old website

### Current website locally

- [x] The text on "Developing faster with reload mode" guide is not aligned

| Thanks for putting this together @freddyaboulton, will start checking them off today.
Just a note that all the embedded example components are currently missing on the docs (not just Textbox and HighlightedText). We removed them because we had hardcoded the config, and they wouldn't update if anything changed. I'm going to open a PR soon to launch and embed them the same way we do for demos.
Thats awesome @aliabd ! | 2022-07-11T15:41:44 |
|
gradio-app/gradio | 1,761 | gradio-app__gradio-1761 | [
"1737"
] | 9918a9540bcb93e6822dfad4185ac1f78657912c | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -284,6 +284,7 @@ class Textbox(Changeable, Submittable, IOComponent):
Creates a textarea for user to enter string input or display string output.
Preprocessing: passes textarea value as a {str} into the function.
Postprocessing: expects a {str} returned from function and sets textarea value to it.
+ Examples-format: a {str} representing the textbox input.
Demos: hello_world, diff_texts, sentence_builder
"""
@@ -480,6 +481,7 @@ class Number(Changeable, Submittable, IOComponent):
Creates a numeric field for user to enter numbers as input or display numeric output.
Preprocessing: passes field value as a {float} or {int} into the function, depending on `precision`.
Postprocessing: expects an {int} or {float} returned from the function and sets field value to it.
+ Examples-format: a {float} or {int} representing the number's value.
Demos: tax_calculator, titanic_survival, blocks_simple_squares
"""
@@ -664,6 +666,7 @@ class Slider(Changeable, IOComponent):
Creates a slider that ranges from `minimum` to `maximum` with a step size of `step`.
Preprocessing: passes slider value as a {float} into the function.
Postprocessing: expects an {int} or {float} returned from function and sets slider value to it as long as it is within range.
+ Examples-format: A {float} or {int} representing the slider's value.
Demos: sentence_builder, generate_tone, titanic_survival
"""
@@ -826,6 +829,7 @@ class Checkbox(Changeable, IOComponent):
Preprocessing: passes the status of the checkbox as a {bool} into the function.
Postprocessing: expects a {bool} returned from the function and, if it is True, checks the checkbox.
+ Examples-format: a {bool} representing whether the box is checked.
Demos: sentence_builder, titanic_survival
"""
@@ -948,7 +952,7 @@ class CheckboxGroup(Changeable, IOComponent):
Creates a set of checkboxes of which a subset can be checked.
Preprocessing: passes the list of checked checkboxes as a {List[str]} or their indices as a {List[int]} into the function, depending on `type`.
Postprocessing: expects a {List[str]}, each element of which becomes a checked checkbox.
-
+ Examples-format: a {List[str]} representing the values to be checked.
Demos: sentence_builder, titanic_survival
"""
@@ -1119,6 +1123,7 @@ class Radio(Changeable, IOComponent):
Creates a set of radio buttons of which only one can be selected.
Preprocessing: passes the value of the selected radio button as a {str} or its index as an {int} into the function, depending on `type`.
Postprocessing: expects a {str} corresponding to the value of the radio button to be selected.
+ Examples-format: a {str} representing the radio option to select.
Demos: sentence_builder, titanic_survival, blocks_essay
"""
@@ -1270,7 +1275,7 @@ class Dropdown(Radio):
Creates a dropdown of which only one entry can be selected.
Preprocessing: passes the value of the selected dropdown entry as a {str} or its index as an {int} into the function, depending on `type`.
Postprocessing: expects a {str} corresponding to the value of the dropdown entry to be selected.
-
+ Examples-format: a {str} representing the drop down value to select.
Demos: sentence_builder, titanic_survival
"""
@@ -1328,7 +1333,7 @@ class Image(Editable, Clearable, Changeable, Streamable, IOComponent):
Creates an image component that can be used to upload/draw images (as an input) or display images (as an output).
Preprocessing: passes the uploaded image as a {numpy.array}, {PIL.Image} or {str} filepath depending on `type` -- unless `tool` is `sketch`. In the special case, a {dict} with keys `image` and `mask` is passed, and the format of the corresponding values depends on `type`.
Postprocessing: expects a {numpy.array}, {PIL.Image} or {str} filepath to an image and displays the image.
-
+ Examples-format: a {str} filepath to a local file that contains the image.
Demos: image_mod
"""
@@ -1698,7 +1703,7 @@ class Video(Changeable, Clearable, Playable, IOComponent):
Creates an video component that can be used to upload/record videos (as an input) or display videos (as an output).
Preprocessing: passes the uploaded video as a {str} filepath whose extension can be set by `format`.
Postprocessing: expects a {str} filepath to a video which is displayed.
-
+ Examples-format: a {str} filepath to a local file that contains the video.
Demos: video_identity
"""
@@ -1877,7 +1882,7 @@ class Audio(Changeable, Clearable, Playable, Streamable, IOComponent):
Creates an audio component that can be used to upload/record audio (as an input) or display audio (as an output).
Preprocessing: passes the uploaded audio as a {Tuple(int, numpy.array)} corresponding to (sample rate, data) or as a {str} filepath, depending on `type`
Postprocessing: expects a {Tuple(int, numpy.array)} corresponding to (sample rate, data) or as a {str} filepath to an audio file, which gets displayed
-
+ Examples-format: a {str} filepath to a local file that contains audio.
Demos: main_note, generate_tone, reverse_audio
"""
@@ -2198,7 +2203,7 @@ class File(Changeable, Clearable, IOComponent):
Creates a file component that allows uploading generic file (when used as an input) and or displaying generic files (output).
Preprocessing: passes the uploaded file as a {file-object} or {List[file-object]} depending on `file_count` (or a {bytes}/{List{bytes}} depending on `type`)
Postprocessing: expects function to return a {str} path to a file, or {List[str]} consisting of paths to files.
-
+ Examples-format: a {str} path to a local file that populates the component.
Demos: zip_to_json, zip_two_files
"""
@@ -2643,7 +2648,7 @@ class Timeseries(Changeable, IOComponent):
Creates a component that can be used to upload/preview timeseries csv files or display a dataframe consisting of a time series graphically.
Preprocessing: passes the uploaded timeseries data as a {pandas.DataFrame} into the function
Postprocessing: expects a {pandas.DataFrame} or {str} path to a csv to be returned, which is then displayed as a timeseries graph
-
+ Examples-format: a {str} filepath of csv data with time series data.
Demos: fraud_detector
"""
@@ -2885,6 +2890,7 @@ class ColorPicker(Changeable, Submittable, IOComponent):
Creates a color picker for user to select a color as string input.
Preprocessing: passes selected color value as a {str} into the function.
Postprocessing: expects a {str} returned from function and sets color picker value to it.
+ Examples-format: a {str} with a hexadecimal representation of a color, e.g. "#ff0000" for red.
Demos: color_picker
"""
| Specify the format of example data for each component in the docs
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
As a gradio user, I wish the format of example data was properly documented in the Docs page.
In the getting started page for [example inputs](https://gradio.app/getting_started/#example-inputs), I see `The format of example data for each component is specified in the Docs` but I can't find that in the docs page.
**Describe the solution you'd like**
We should document the expected format of example data for each component.
We can add a docstring to the doc, or document the `preprocess_example` method for each component so that the docstring and code are never out of synch.
**Additional context**
Add any other context or screenshots about the feature request here.
| 2022-07-11T21:45:33 |
||
gradio-app/gradio | 1,768 | gradio-app__gradio-1768 | [
"1743"
] | a18c7ddf046546496b576c2d96496406f3fdc7d9 | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -2883,6 +2883,7 @@ def style(
)
+@document()
class ColorPicker(Changeable, Submittable, IOComponent):
"""
Creates a color picker for user to select a color as string input.
@@ -2905,12 +2906,12 @@ def __init__(
):
"""
Parameters:
- value (str): default text to provide in color picker.
- label (Optional[str]): component name in interface.
- show_label (bool): if True, will display label.
- interactive (Optional[bool]): if True, will be rendered as an editable color picker; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
- visible (bool): If False, component will be hidden.
- elem_id (Optional[str]): An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
+ value: default text to provide in color picker.
+ label: component name in interface.
+ show_label: if True, will display label.
+ interactive: if True, will be rendered as an editable color picker; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
+ visible: If False, component will be hidden.
+ elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
"""
self.value = self.postprocess(value)
self.cleared_value = "#000000"
| ColorPicker component does not appear in documentation
### Describe the bug
The color picker component does not show up in the documentation.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Go to gradio.app/docs/
### Screenshot

### Logs
```shell
-
```
### System Info
```shell
-
```
### Severity
annoying
| 2022-07-12T15:58:47 |
||
gradio-app/gradio | 1,779 | gradio-app__gradio-1779 | [
"1766"
] | 8caec11d74612820844cb2b2d52bf7aadaa93796 | diff --git a/demo/color_picker/run.py b/demo/color_picker/run.py
--- a/demo/color_picker/run.py
+++ b/demo/color_picker/run.py
@@ -1,12 +1,11 @@
import gradio as gr
import numpy as np
+import os
from PIL import Image, ImageColor
-
def change_color(icon, color):
-
"""
Function that given an icon in .png format changes its color
Args:
@@ -19,24 +18,27 @@ def change_color(icon, color):
img = img.convert("RGBA")
image_np = np.array(icon)
_, _, _, alpha = image_np.T
- mask = (alpha > 0)
+ mask = alpha > 0
image_np[..., :-1][mask.T] = ImageColor.getcolor(color, "RGB")
edited_image = Image.fromarray(image_np)
return edited_image
inputs = [
- gr.Image(label="icon", type="pil", image_mode="RGBA"),
- gr.ColorPicker(label="color")
- ]
+ gr.Image(label="icon", type="pil", image_mode="RGBA"),
+ gr.ColorPicker(label="color"),
+]
outputs = gr.Image(label="colored icon")
demo = gr.Interface(
fn=change_color,
inputs=inputs,
outputs=outputs,
+ examples=[
+ [os.path.join(os.path.dirname(__file__), "lion.jpg"), "#ff0000"],
+ [os.path.join(os.path.dirname(__file__), "lion.jpg"), "#0000FF"],
+ ],
)
if __name__ == "__main__":
demo.launch()
-
| ColorPicker examples not displayed in examples dataset component
### Describe the bug
When running an app with ColorPicker examples, the example colors are not displayed in the examples dataset/dataframe.
The color is being correctly selected and displayed in the ColorPicker component though, which makes me think the backend logic is sound and it's a problem specific to the Dataset UI component.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Run `demo/color_picker/run.py` with the following modifications (setting red and blue as example colors)
```python
import gradio as gr
import numpy as np
from PIL import Image, ImageColor
def change_color(icon, color):
"""
Function that given an icon in .png format changes its color
Args:
icon: Icon whose color needs to be changed.
color: Chosen color with which to edit the input icon.
Returns:
edited_image: Edited icon.
"""
img = icon.convert("LA")
img = img.convert("RGBA")
image_np = np.array(icon)
_, _, _, alpha = image_np.T
mask = (alpha > 0)
image_np[..., :-1][mask.T] = ImageColor.getcolor(color, "RGB")
edited_image = Image.fromarray(image_np)
return edited_image
inputs = [
gr.Image(label="icon", type="pil", image_mode="RGBA"),
gr.ColorPicker(label="color")
]
outputs = gr.Image(label="colored icon")
demo = gr.Interface(
fn=change_color,
inputs=inputs,
outputs=outputs,
examples=[["/Users/freddy/sources/gradio/demo/blocks_inputs/lion.jpg", "#ff0000"],
["/Users/freddy/sources/gradio/demo/blocks_inputs/lion.jpg", "#0000FF"]]
)
if __name__ == "__main__":
demo.launch()
```
### Screenshot

### Logs
```shell
-
```
### System Info
```shell
On main
```
### Severity
annoying
| 2022-07-13T11:24:43 |
||
gradio-app/gradio | 1,785 | gradio-app__gradio-1785 | [
"1773"
] | de4458361b359e2333d8d265cb3c57b91bec513b | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -2396,12 +2396,13 @@ class DataframeData(TypedDict):
data: List[List[str | int | bool]]
+@document()
class Dataframe(Changeable, IOComponent):
"""
Accepts or displays 2D input through a spreadsheet-like component for dataframes.
Preprocessing: passes the uploaded spreadsheet data as a {pandas.DataFrame}, {numpy.array}, {List[List]}, or {List} depending on `type`
Postprocessing: expects a {pandas.DataFrame}, {numpy.array}, {List[List]}, {List}, or {str} path to a csv, which is rendered in the spreadsheet.
-
+ Examples-format: a {str} filepath to a csv with data.
Demos: filter_records, matrix_transpose, tax_calculator
"""
| Why is gr.dataframe not found in official documentation?
Is the component description missing from the official documentation?
Also, is there a component that supports arrays directly?
Also, I used HuggingFece to build an API interface for uploading Excel files on gr.File. How to submit the body part?
I build the app address is: https://huggingface.co/spaces/changxin/pq
Also you can look at: https://hf.space/embed/changxin/pq/+
API address is: https://hf.space/embed/changxin/pq/+/api/df
Input Payload
{
"data": [
(List[Dict[name: str, data: str]]), // represents str, data: str]]): List of JSON objects with filename as 'name' property and base64 data as 'data' property of 'Select the Excel File' File that you want to read component
]
}
How do I build the body part of this post api?
| Thanks for filing @tt0203 ! Yep, `Dataframe` should be a part of the documentation I'll make a note of this in #1673 .
Regarding the body of the post api, I think something alone these lines will work:
```python
import os
import requests
from gradio.processing_utils import encode_file_to_base64
data = encode_file_to_base64(filepath)
prediction = requests.post(url, json={"data": data, "name": filepath, "size": os.path.getsize(filepath)})
```
Can you explain your use case a bit more? Curious why you're trying to hit the api manually.
| 2022-07-13T17:46:13 |
|
gradio-app/gradio | 1,790 | gradio-app__gradio-1790 | [
"1778"
] | c323aac2747307c3ce3e3b6de55cfe5b166cf5d2 | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -1448,7 +1448,6 @@ def format_image(
if self.type == "file":
warnings.warn(
"The 'file' type has been deprecated. Set parameter 'type' to 'filepath' instead.",
- DeprecationWarning,
)
return file_obj
else:
@@ -1996,7 +1995,6 @@ def preprocess(self, x: Dict[str, str] | None) -> Tuple[int, np.array] | str | N
if self.type == "file":
warnings.warn(
"The 'file' type has been deprecated. Set parameter 'type' to 'filepath' instead.",
- DeprecationWarning,
)
return file_obj
elif self.type == "filepath":
@@ -2018,7 +2016,6 @@ def serialize(self, x, called_directly):
elif self.type == "file":
warnings.warn(
"The 'file' type has been deprecated. Set parameter 'type' to 'filepath' instead.",
- DeprecationWarning,
)
name = x.name
elif self.type == "numpy":
@@ -3177,7 +3174,6 @@ def __init__(
if color_map is not None:
warnings.warn(
"The 'color_map' parameter has been moved from the constructor to `HighlightedText.style()` ",
- DeprecationWarning,
)
self.show_legend = show_legend
self.combine_adjacent = combine_adjacent
@@ -3546,7 +3542,6 @@ def __init__(
"""
warnings.warn(
"The Carousel component is partially deprecated. It may not behave as expected.",
- DeprecationWarning,
)
if not isinstance(components, list):
components = [components]
@@ -3659,7 +3654,6 @@ def __init__(
if color_map is not None:
warnings.warn(
"The 'color_map' parameter has been moved from the constructor to `Chatbot.style()` ",
- DeprecationWarning,
)
self.value = self.postprocess(value)
diff --git a/gradio/inputs.py b/gradio/inputs.py
--- a/gradio/inputs.py
+++ b/gradio/inputs.py
@@ -25,7 +25,6 @@ def __init__(
):
warnings.warn(
"Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your component from gradio.components",
- DeprecationWarning,
)
super().__init__(
value=default,
@@ -58,7 +57,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your component from gradio.components",
- DeprecationWarning,
)
super().__init__(value=default, label=label, optional=optional)
@@ -89,7 +87,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your component from gradio.components",
- DeprecationWarning,
)
super().__init__(
@@ -122,7 +119,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your component from gradio.components",
- DeprecationWarning,
)
super().__init__(value=default, label=label, optional=optional)
@@ -151,7 +147,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your component from gradio.components",
- DeprecationWarning,
)
super().__init__(
value=default,
@@ -186,7 +181,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your component from gradio.components",
- DeprecationWarning,
)
super().__init__(
choices=choices,
@@ -221,7 +215,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your component from gradio.components",
- DeprecationWarning,
)
super().__init__(
choices=choices,
@@ -262,7 +255,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your component from gradio.components",
- DeprecationWarning,
)
super().__init__(
shape=shape,
@@ -299,7 +291,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(format=type, source=source, label=label, optional=optional)
@@ -326,7 +317,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(source=source, type=type, label=label, optional=optional)
@@ -355,7 +345,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(
file_count=file_count,
@@ -398,7 +387,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(
value=default,
@@ -435,7 +423,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(x=x, y=y, label=label, optional=optional)
@@ -459,7 +446,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.inputs is deprecated, and will not be supported in the future, please import this component as gr.Variable from gradio.components",
- DeprecationWarning,
)
super().__init__(value=default, label=label)
@@ -482,6 +468,5 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(label=label, optional=optional)
diff --git a/gradio/outputs.py b/gradio/outputs.py
--- a/gradio/outputs.py
+++ b/gradio/outputs.py
@@ -20,7 +20,6 @@ def __init__(
):
warnings.warn(
"Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(label=label, type=type)
@@ -42,7 +41,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
if plot:
type = "plot"
@@ -63,7 +61,6 @@ def __init__(self, type: Optional[str] = None, label: Optional[str] = None):
"""
warnings.warn(
"Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(format=type, label=label)
@@ -82,7 +79,6 @@ def __init__(self, type: str = "auto", label: Optional[str] = None):
"""
warnings.warn(
"Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(type=type, label=label)
@@ -100,7 +96,6 @@ def __init__(self, label: Optional[str] = None):
"""
warnings.warn(
"Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(label=label)
@@ -131,7 +126,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(
headers=headers,
@@ -160,7 +154,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(x=x, y=y, label=label)
@@ -178,7 +171,6 @@ def __init__(self, label: Optional[str] = None):
"""
warnings.warn(
"Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(label=label)
@@ -203,7 +195,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(num_top_classes=num_top_classes, type=type, label=label)
@@ -247,7 +238,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(color_map=color_map, label=label, show_legend=show_legend)
@@ -265,7 +255,6 @@ def __init__(self, label: Optional[str] = None):
"""
warnings.warn(
"Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(label=label)
@@ -301,7 +290,6 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(components=components, label=label)
@@ -319,7 +307,6 @@ def __init__(self, label: Optional[str] = None):
"""
warnings.warn(
"Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(label=label)
@@ -342,6 +329,5 @@ def __init__(
"""
warnings.warn(
"Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
- DeprecationWarning,
)
super().__init__(clear_color=clear_color, label=label)
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -9,6 +9,7 @@
import numpy as np
import pandas as pd
import PIL
+import pytest
import gradio as gr
from gradio import media_data
@@ -30,6 +31,14 @@ def test_component_functions(self):
assert isinstance(gr.components.component("text"), gr.templates.Text)
+def test_raise_warnings():
+ for c_type, component in zip(
+ ["inputs", "outputs"], [gr.inputs.Textbox, gr.outputs.Label]
+ ):
+ with pytest.warns(UserWarning, match=f"Usage of gradio.{c_type}"):
+ component()
+
+
class TestTextbox(unittest.TestCase):
def test_component_functions(self):
"""
@@ -803,7 +812,7 @@ def test_component_functions(self):
x_wav["is_example"] = True
x_wav["crop_min"], x_wav["crop_max"] = 1, 4
self.assertIsNotNone(audio_input.preprocess(x_wav))
- with self.assertWarns(DeprecationWarning):
+ with self.assertWarns(UserWarning):
audio_input = gr.Audio(type="file")
audio_input.preprocess(x_wav)
with open("test/test_files/audio_sample.wav") as f:
| Throw warning if using `gr.inputs.*` or `gr.outputs.*`
I think we used to have a deprecation warning, not sure where it went
| 2022-07-13T21:40:28 |
|
gradio-app/gradio | 1,796 | gradio-app__gradio-1796 | [
"1786"
] | 0dbc8bfcf35cd0f87fd30e00639d0390a65f456c | diff --git a/demo/chatbot_demo/run.py b/demo/chatbot_demo/run.py
--- a/demo/chatbot_demo/run.py
+++ b/demo/chatbot_demo/run.py
@@ -17,7 +17,7 @@ def chat(message, history):
return history, history
-chatbot = gr.Chatbot(color_map=("green", "pink")).style()
+chatbot = gr.Chatbot().style(color_map=("green", "pink"))
demo = gr.Interface(
chat,
["text", "state"],
diff --git a/demo/image_mod/run.py b/demo/image_mod/run.py
--- a/demo/image_mod/run.py
+++ b/demo/image_mod/run.py
@@ -6,7 +6,7 @@ def image_mod(image):
return image.rotate(45)
-demo = gr.Interface(image_mod, gr.inputs.Image(type="pil"), "image",
+demo = gr.Interface(image_mod, gr.Image(type="pil"), "image",
flagging_options=["blurry", "incorrect", "other"], examples=[
os.path.join(os.path.dirname(__file__), "images/cheetah1.jpg"),
os.path.join(os.path.dirname(__file__), "images/lion.jpg"),
| Remove usage of deprecated inputs and outputs from docs and guides
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
As a gradio user, I don't want to see usage of the deprecated `gr.inputs` and `gr.outputs` in the examples and guides.
Here is an example from the "Hugging face integrations" guide

**Describe the solution you'd like**
Remove usage of deprecated `gr.inputs` and `gr.outputs` guides.
**Additional context**
Add any other context or screenshots about the feature request here.
| 2022-07-14T18:54:01 |
||
gradio-app/gradio | 1,799 | gradio-app__gradio-1799 | [
"1795"
] | 106934fe70e02a2c1c716c3336d684275909e6f3 | diff --git a/demo/english_translator/run.py b/demo/english_translator/run.py
new file mode 100644
--- /dev/null
+++ b/demo/english_translator/run.py
@@ -0,0 +1,25 @@
+import gradio as gr
+
+from transformers import pipeline
+
+pipe = pipeline("translation", model="t5-base")
+
+
+def translate(text):
+ return pipe(text)[0]["translation_text"]
+
+
+with gr.Blocks() as demo:
+ with gr.Row():
+ with gr.Column():
+ english = gr.Textbox(label="English text")
+ translate_btn = gr.Button(value="Translate")
+ with gr.Column():
+ german = gr.Textbox(label="German Text")
+
+ translate_btn.click(translate, inputs=english, outputs=german)
+ examples = gr.Examples(examples=["I went to the supermarket yesterday.", "Helen is a good swimmer."],
+ inputs=[english])
+
+if __name__ == "__main__":
+ demo.launch()
\ No newline at end of file
diff --git a/demo/generate_english_german/run.py b/demo/generate_english_german/run.py
new file mode 100644
--- /dev/null
+++ b/demo/generate_english_german/run.py
@@ -0,0 +1,27 @@
+import gradio as gr
+
+from transformers import pipeline
+
+english_translator = gr.Blocks.load(name="spaces/freddyaboulton/english-translator")
+english_generator = pipeline("text-generation", model="distilgpt2")
+
+
+def generate_text(text):
+ english_text = english_generator(text)[0]["generated_text"]
+ german_text = english_translator(english_text)
+ return english_text, german_text
+
+
+with gr.Blocks() as demo:
+ with gr.Row():
+ with gr.Column():
+ seed = gr.Text(label="Input Phrase")
+ with gr.Column():
+ english = gr.Text(label="Generated English Text")
+ german = gr.Text(label="Generated German Text")
+ btn = gr.Button("Generate")
+ btn.click(generate_text, inputs=[seed], outputs=[english, german])
+ gr.Examples(["My name is Clara and I am"], inputs=[seed])
+
+if __name__ == "__main__":
+ demo.launch()
\ No newline at end of file
diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -230,7 +230,7 @@ def skip() -> dict:
return update()
-@document()
+@document("load")
class Blocks(BlockContext):
"""
Blocks is Gradio's low-level API that allows you to create more custom web
@@ -262,7 +262,7 @@ def update(name):
btn.click(fn=update, inputs=inp, outputs=out)
demo.launch()
- Demos: blocks_hello, blocks_flipper, blocks_speech_text_length
+ Demos: blocks_hello, blocks_flipper, blocks_speech_text_length, generate_english_german
"""
def __init__(
@@ -686,21 +686,19 @@ def load(
For reverse compatibility reasons, this is both a class method and an instance
method, the two of which, confusingly, do two completely different things.
- Class method: loads a demo from a Hugging Face Spaces repo and creates it locally
- Parameters:
- name (str): the name of the model (e.g. "gpt2"), can include the `src` as prefix (e.g. "models/gpt2")
- src (str | None): the source of the model: `models` or `spaces` (or empty if source is provided as a prefix in `name`)
- api_key (str | None): optional api key for use with Hugging Face Hub
- alias (str | None): optional string used as the name of the loaded model instead of the default name
- type (str): the type of the Blocks, either a standard `blocks` or `column`
- Returns: Blocks instance
-
- Instance method: adds an event for when the demo loads in the browser.
+
+ Class method: loads a demo from a Hugging Face Spaces repo and creates it locally and returns a block instance.
+
+
+ Instance method: adds an event for when the demo loads in the browser and returns None.
Parameters:
- fn: Callable function
- inputs: input list
- outputs: output list
- Returns: None
+ name: Class Method - the name of the model (e.g. "gpt2"), can include the `src` as prefix (e.g. "models/gpt2")
+ src: Class Method - the source of the model: `models` or `spaces` (or empty if source is provided as a prefix in `name`)
+ api_key: Class Method - optional api key for use with Hugging Face Hub
+ alias: Class Method - optional string used as the name of the loaded model instead of the default name
+ fn: Instance Method - Callable function
+ inputs: Instance Method - input list
+ outputs: Instance Method - output list
"""
if isinstance(self_or_cls, type):
if name is None:
| Add a guide on how to use blocks like regular python functions
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
As a gradio user, I wish there was a guide explaining how to use use a blocks app like a regular python function. Right now, it's only mentioned briefly for interfaces in the "Advanced Interfaces" [guide](https://gradio.app/advanced_interface_features/#loading-hugging-face-models-and-spaces) but there is no mention that this works for Blocks.
Some of our users are asking about this on discord

**Describe the solution you'd like**
A guide describing how to use blocks apps like regular python functions and how this relates to loading from hugging face spaces.
We should also add the `load` method to the documentation.
**Additional context**
Add any other context or screenshots about the feature request here.
| 2022-07-15T19:01:21 |
||
gradio-app/gradio | 1,817 | gradio-app__gradio-1817 | [
"1812"
] | 4731b1aaba77c2bb46541f6afe0ddf445949c194 | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -28,7 +28,7 @@
author="Abubakar Abid, Ali Abid, Ali Abdalla, Dawood Khan, Ahsen Khaliq, Pete Allen, Ömer Faruk Özdemir",
author_email="[email protected]",
url="https://github.com/gradio-app/gradio",
- packages=["gradio", "gradio.test_data"],
+ packages=["gradio", "gradio.test_data", "test.test_files"],
license="Apache License 2.0",
keywords=["machine learning", "visualization", "reproducibility"],
install_requires=requirements,
| requirements.txt not bundled on pypi
### Describe the bug
The pypi tarball does not include `requirements.txt`. This has become a problem since #1669 was merged.
The fix is likely much like #1706.
... While at it: it doesn't seem like `test/test_files/*` are included either.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```shell
$ curl 'https://files.pythonhosted.org/packages/90/5f/cd78734cb3cc0ce4decafc07fc7d06e55870d75e1ab14bf96cde412dd87c/gradio-3.0.26.tar.gz' | tar ztvf - | grep -E '(requirements.txt|test/test_files)' || echo "not found!"
not found!
```
### Screenshot
_No response_
### Logs
```shell
FileNotFoundError in setup.py regarding requirements.txt
```
### System Info
```shell
gradio version: 3.0.26
nixos: 22.05
```
### Severity
annoying
| Thank you for filing @pbsds ! I can fix
BTW @pbsds we started automatically tagging all of our releases on github starting with 3.0.25.
Not sure what's needed for nixpkgs but I think you should be able to do
`pip install https://github.com/gradio-app/gradio/archive/refs/tags/v3.0.25.tar.gz`
Super cool that you're adding gradio to nixpkgs btw. I'll add the files you listed here to the source distribution but maybe you can use the github releases as well.
Also, I assume you need `test/requirements.txt` to install the test dependencies? | 2022-07-18T15:36:49 |
|
gradio-app/gradio | 1,818 | gradio-app__gradio-1818 | [
"1807"
] | 175bba2117568573f47d02c49b8935a9a4993e02 | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -12,17 +12,19 @@
import os
import pathlib
import shutil
-import sys
import tempfile
import warnings
from copy import deepcopy
from types import ModuleType
-from typing import Any, Callable, Dict, List, Optional, Tuple
+from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
-if sys.version_info[0] == 3 and sys.version_info[1] >= 8:
+if TYPE_CHECKING:
from typing import TypedDict
-else:
- from typing_extensions import TypedDict
+
+ class DataframeData(TypedDict):
+ headers: List[str]
+ data: List[List[str | int | bool]]
+
import matplotlib.figure
import numpy as np
@@ -2400,11 +2402,6 @@ def style(
)
-class DataframeData(TypedDict):
- headers: List[str]
- data: List[List[str | int | bool]]
-
-
@document()
class Dataframe(Changeable, IOComponent):
"""
| diff --git a/scripts/create_test_requirements.sh b/scripts/create_test_requirements.sh
--- a/scripts/create_test_requirements.sh
+++ b/scripts/create_test_requirements.sh
@@ -3,8 +3,8 @@ if [ -z "$(ls | grep CONTRIBUTING.md)" ]; then
echo "Please run the script from repo directory"
exit -1
else
- echo "Creating requirements under test/requirements.txt using requirements.in. Please run this script from unix or wsl!"
+ echo "Creating requirements under test/requirements.txt using requirements.in. Please run this script from unix or wsl in a python3.7 env!"
cd test
pip install --upgrade pip-tools
- pip-compile
+ pip-compile --output-file requirements.txt
fi
diff --git a/test/requirements.in b/test/requirements.in
--- a/test/requirements.in
+++ b/test/requirements.in
@@ -17,4 +17,5 @@ black
isort
flake8
httpx
-pydantic
\ No newline at end of file
+pydantic
+respx
\ No newline at end of file
diff --git a/test/requirements.txt b/test/requirements.txt
--- a/test/requirements.txt
+++ b/test/requirements.txt
@@ -1,76 +1,52 @@
#
-# This file is autogenerated by pip-compile with python 3.9
+# This file is autogenerated by pip-compile with python 3.7
# To update, run:
#
-# pip-compile
+# pip-compile --output-file=requirements.txt
#
-absl-py==1.0.0
- # via
- # tensorboard
- # tensorflow
-alembic==1.7.6
+alembic==1.8.1
# via mlflow
-asttokens==2.0.5
- # via stack-data
-astunparse==1.6.3
- # via tensorflow
+anyio==3.6.1
+ # via httpcore
asyncio==3.4.3
# via -r requirements.in
-atomicwrites==1.4.1
- # via pytest
attrs==21.4.0
# via
# jsonschema
# pytest
backcall==0.2.0
# via ipython
-black==22.1.0
- # via
- # -r requirements.in
- # ipython
-cachetools==5.0.0
- # via google-auth
-certifi==2021.10.8
+black==22.6.0
+ # via -r requirements.in
+certifi==2022.6.15
# via
# dulwich
+ # httpcore
+ # httpx
# requests
# sentry-sdk
- # urllib3
-cffi==1.15.0
- # via cryptography
-charset-normalizer==2.0.11
+charset-normalizer==2.1.0
# via requests
-click==8.0.3
+click==8.1.3
# via
# black
# databricks-cli
# flask
# mlflow
- # sacremoses
# wandb
-cloudpickle==2.0.0
+cloudpickle==2.1.0
# via
# mlflow
# shap
-colorama==0.4.4
- # via
- # click
- # ipython
- # pytest
- # tqdm
-comet-ml==3.25.0
+comet-ml==3.31.6
# via -r requirements.in
configobj==5.0.6
# via everett
-coverage[toml]==6.3.1
+coverage[toml]==6.4.2
# via
# -r requirements.in
# pytest-cov
-cryptography==36.0.1
- # via
- # pyopenssl
- # urllib3
-databricks-cli==0.16.4
+databricks-cli==0.17.0
# via mlflow
decorator==5.1.1
# via ipython
@@ -78,95 +54,90 @@ docker==5.0.3
# via mlflow
docker-pycreds==0.4.0
# via wandb
-dulwich==0.20.32
+dulwich==0.20.45
# via comet-ml
entrypoints==0.4
# via mlflow
everett[ini]==3.0.0
# via comet-ml
-executing==0.8.2
- # via stack-data
-filelock==3.4.2
+filelock==3.7.1
# via
# huggingface-hub
# transformers
flake8==4.0.1
# via -r requirements.in
-flask==2.0.2
+flask==2.1.3
# via
# mlflow
# prometheus-flask-exporter
-flatbuffers==2.0
- # via tensorflow
-gast==0.5.3
- # via tensorflow
gitdb==4.0.9
# via gitpython
-gitpython==3.1.26
+gitpython==3.1.27
# via
# mlflow
# wandb
-google-auth==2.6.0
- # via
- # google-auth-oauthlib
- # tensorboard
-google-auth-oauthlib==0.4.6
- # via tensorboard
-google-pasta==0.2.0
- # via tensorflow
greenlet==1.1.2
# via sqlalchemy
-grpcio==1.43.0
+gunicorn==20.1.0
+ # via mlflow
+h11==0.12.0
+ # via httpcore
+httpcore==0.15.0
+ # via httpx
+httpx==0.23.0
# via
- # tensorboard
- # tensorflow
-h5py==3.6.0
- # via tensorflow
-huggingface-hub==0.4.0
+ # -r requirements.in
+ # respx
+huggingface-hub==0.8.1
# via
# -r requirements.in
# transformers
idna==3.3
# via
+ # anyio
# requests
- # urllib3
-imageio==2.14.1
+ # rfc3986
+imageio==2.19.5
# via scikit-image
-importlib-metadata==4.10.1
+importlib-metadata==4.2.0
# via
- # markdown
+ # alembic
+ # click
+ # flake8
+ # flask
+ # huggingface-hub
+ # jsonschema
+ # mako
# mlflow
+ # pluggy
+ # pytest
+ # sqlalchemy
+ # transformers
+importlib-resources==5.8.0
+ # via
+ # alembic
+ # jsonschema
iniconfig==1.1.1
# via pytest
-ipython==8.0.1
+ipython==7.34.0
# via -r requirements.in
isort==5.10.1
# via -r requirements.in
-itsdangerous==2.0.1
+itsdangerous==2.1.2
# via flask
jedi==0.18.1
# via ipython
-jinja2==3.0.3
+jinja2==3.1.2
# via flask
joblib==1.1.0
- # via
- # sacremoses
- # scikit-learn
-jsonschema==4.4.0
+ # via scikit-learn
+jsonschema==4.7.2
# via comet-ml
-keras==2.8.0
- # via tensorflow
-keras-preprocessing==1.1.2
- # via tensorflow
-libclang==13.0.0
- # via tensorflow
-llvmlite==0.38.0
+llvmlite==0.38.1
# via numba
-mako==1.1.6
+mako==1.2.1
# via alembic
-markdown==3.3.6
- # via tensorboard
-markupsafe==2.0.1
+markupsafe==2.1.1
# via
# jinja2
# mako
@@ -174,38 +145,31 @@ matplotlib-inline==0.1.3
# via ipython
mccabe==0.6.1
# via flake8
-mlflow==1.23.1
+mlflow==1.27.0
# via -r requirements.in
mypy-extensions==0.4.3
# via black
networkx==2.6.3
# via scikit-image
-numba==0.55.1
+numba==0.55.2
# via shap
-numpy==1.21.5
+numpy==1.21.6
# via
- # h5py
# imageio
- # keras-preprocessing
# mlflow
# numba
- # opt-einsum
# pandas
# pywavelets
# scikit-image
# scikit-learn
# scipy
# shap
- # tensorboard
- # tensorflow
# tifffile
# transformers
nvidia-ml-py3==7.352.0
# via comet-ml
oauthlib==3.2.0
- # via requests-oauthlib
-opt-einsum==3.3.0
- # via tensorflow
+ # via databricks-cli
packaging==21.3
# via
# huggingface-hub
@@ -214,7 +178,7 @@ packaging==21.3
# scikit-image
# shap
# transformers
-pandas==1.4.0
+pandas==1.3.5
# via
# mlflow
# shap
@@ -224,74 +188,66 @@ pathspec==0.9.0
# via black
pathtools==0.1.2
# via wandb
+pexpect==4.8.0
+ # via ipython
pickleshare==0.7.5
# via ipython
-pillow==9.0.1
+pillow==9.2.0
# via
# imageio
# scikit-image
-platformdirs==2.4.1
+platformdirs==2.5.2
# via black
pluggy==1.0.0
# via pytest
-prometheus-client==0.13.1
+prometheus-client==0.14.1
# via prometheus-flask-exporter
-prometheus-flask-exporter==0.18.7
+prometheus-flask-exporter==0.20.2
# via mlflow
promise==2.3
# via wandb
-prompt-toolkit==3.0.26
+prompt-toolkit==3.0.30
# via ipython
-protobuf==3.19.4
+protobuf==3.20.1
# via
# mlflow
- # tensorboard
- # tensorflow
# wandb
-psutil==5.9.0
+psutil==5.9.1
# via wandb
-pure-eval==0.2.2
- # via stack-data
+ptyprocess==0.7.0
+ # via pexpect
py==1.11.0
# via pytest
-pyasn1==0.4.8
- # via
- # pyasn1-modules
- # rsa
-pyasn1-modules==0.2.8
- # via google-auth
pycodestyle==2.8.0
# via flake8
-pycparser==2.21
- # via cffi
+pydantic==1.9.1
+ # via -r requirements.in
pyflakes==2.4.0
# via flake8
-pygments==2.11.2
+pygments==2.12.0
# via ipython
-pyopenssl==22.0.0
- # via urllib3
-pyparsing==3.0.7
+pyjwt==2.4.0
+ # via databricks-cli
+pyparsing==3.0.9
# via packaging
pyrsistent==0.18.1
# via jsonschema
-pytest==7.0.0
+pytest==7.1.2
# via
# -r requirements.in
# pytest-asyncio
# pytest-cov
-pytest-asyncio==0.18.3
+pytest-asyncio==0.19.0
# via -r requirements.in
pytest-cov==3.0.0
# via -r requirements.in
python-dateutil==2.8.2
- # via
- # pandas
- # wandb
-pytz==2021.3
+ # via pandas
+pytz==2022.1
# via
# mlflow
# pandas
-pywavelets==1.2.0
+pywavelets==1.3.0
# via scikit-image
pyyaml==6.0
# via
@@ -301,164 +257,133 @@ pyyaml==6.0
# wandb
querystring-parser==1.2.4
# via mlflow
-regex==2022.1.18
- # via
- # sacremoses
- # transformers
-requests==2.27.1
+regex==2022.7.9
+ # via transformers
+requests==2.28.1
# via
# comet-ml
# databricks-cli
# docker
# huggingface-hub
# mlflow
- # requests-oauthlib
# requests-toolbelt
- # tensorboard
# transformers
# wandb
-requests-oauthlib==1.3.1
- # via google-auth-oauthlib
requests-toolbelt==0.9.1
# via comet-ml
-rsa==4.8
- # via google-auth
-sacremoses==0.0.47
- # via transformers
-scikit-image==0.19.1
+respx==0.19.2
+ # via -r requirements.in
+rfc3986[idna2008]==1.5.0
+ # via httpx
+scikit-image==0.19.3
# via -r requirements.in
scikit-learn==1.0.2
# via shap
-scipy==1.8.0
+scipy==1.7.3
# via
# mlflow
# scikit-image
# scikit-learn
# shap
-selenium==4.0.0a6.post2
- # via -r requirements.in
-semantic-version==2.9.0
+semantic-version==2.10.0
# via comet-ml
-sentry-sdk==1.5.4
+sentry-sdk==1.7.2
+ # via
+ # comet-ml
+ # wandb
+setproctitle==1.2.3
# via wandb
-shap==0.40.0
+shap==0.41.0
# via -r requirements.in
-shortuuid==1.0.8
+shortuuid==1.0.9
# via wandb
six==1.16.0
# via
- # absl-py
- # asttokens
- # astunparse
# comet-ml
# configobj
# databricks-cli
# docker-pycreds
- # google-auth
- # google-pasta
- # grpcio
- # keras-preprocessing
# promise
# python-dateutil
# querystring-parser
- # sacremoses
- # tensorflow
# wandb
slicer==0.0.7
# via shap
smmap==5.0.0
# via gitdb
-sqlalchemy==1.4.31
+sniffio==1.2.0
+ # via
+ # anyio
+ # httpcore
+ # httpx
+sqlalchemy==1.4.39
# via
# alembic
# mlflow
sqlparse==0.4.2
# via mlflow
-stack-data==0.1.4
- # via ipython
-tabulate==0.8.9
+tabulate==0.8.10
# via databricks-cli
-tensorboard==2.8.0
- # via tensorflow
-tensorboard-data-server==0.6.1
- # via tensorboard
-tensorboard-plugin-wit==1.8.1
- # via tensorboard
-tensorflow==2.8.0
- # via -r requirements.in
-tensorflow-io-gcs-filesystem==0.24.0
- # via tensorflow
-termcolor==1.1.0
- # via
- # tensorflow
- # yaspin
-tf-estimator-nightly==2.8.0.dev2021122109
- # via tensorflow
threadpoolctl==3.1.0
# via scikit-learn
-tifffile==2022.2.2
+tifffile==2021.11.2
# via scikit-image
-tokenizers==0.11.4
+tokenizers==0.12.1
# via transformers
-tomli==2.0.0
+tomli==2.0.1
# via
# black
# coverage
# pytest
-torch==1.10.2
+torch==1.12.0
# via -r requirements.in
-tqdm==4.62.3
+tqdm==4.64.0
# via
# huggingface-hub
- # sacremoses
# shap
# transformers
-traitlets==5.1.1
+traitlets==5.3.0
# via
# ipython
# matplotlib-inline
-transformers==4.16.2
+transformers==4.20.1
# via -r requirements.in
-typing-extensions==4.0.1
+typed-ast==1.5.4
+ # via black
+typing-extensions==4.3.0
# via
+ # anyio
# black
+ # gitpython
# huggingface-hub
- # tensorflow
+ # importlib-metadata
+ # jsonschema
+ # pydantic
+ # pytest-asyncio
# torch
-urllib3[secure]==1.26.8
+urllib3==1.26.10
# via
# dulwich
# requests
- # selenium
# sentry-sdk
-waitress==2.1.1
- # via mlflow
-wandb==0.12.10
+wandb==0.12.21
# via -r requirements.in
wcwidth==0.2.5
# via prompt-toolkit
-websocket-client==1.2.3
+websocket-client==1.3.3
# via
# comet-ml
# docker
-werkzeug==2.0.2
- # via
- # flask
- # tensorboard
-wheel==0.37.1
- # via
- # astunparse
- # tensorboard
-wrapt==1.13.3
- # via
- # comet-ml
- # tensorflow
+werkzeug==2.1.2
+ # via flask
+wrapt==1.14.1
+ # via comet-ml
wurlitzer==3.0.2
# via comet-ml
-yaspin==2.1.0
- # via wandb
-zipp==3.7.0
- # via importlib-metadata
+zipp==3.8.1
+ # via
+ # importlib-metadata
+ # importlib-resources
# The following packages are considered to be unsafe in a requirements file:
# setuptools
diff --git a/test/test_documentation.py b/test/test_documentation.py
--- a/test/test_documentation.py
+++ b/test/test_documentation.py
@@ -1,9 +1,16 @@
+import sys
import unittest
+import pytest
+
import gradio as gr
-class TestDocumentatino(unittest.TestCase):
+class TestDocumentation(unittest.TestCase):
+ @pytest.mark.skipif(
+ sys.version_info < (3, 8),
+ reason="Docs use features in inspect module not available in py 3.7",
+ )
def test_documentation(self):
documentation = gr.documentation.generate_documentation()
assert len(documentation) > 0
diff --git a/test/test_utils.py b/test/test_utils.py
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -3,13 +3,13 @@
import unittest
import unittest.mock as mock
import warnings
-from typing import Literal
import pytest
import pytest_asyncio
import requests
-from httpx import AsyncClient
+from httpx import AsyncClient, Response
from pydantic import BaseModel
+from typing_extensions import Literal
from gradio.test_data.blocks_configs import (
XRAY_CONFIG,
@@ -259,143 +259,155 @@ async def client():
def make_mock_response(return_value):
- response = mock.MagicMock(name="mock_response")
- response.status_code = 201
- response.json.return_value = return_value
- return response
+ return Response(201, json=return_value)
[email protected]("gradio.utils.client.send")
-class TestRequest:
- @pytest.mark.asyncio
- async def test_get(self, mock_send):
+MOCK_REQUEST_URL = "https://very_real_url.com"
- mock_send.return_value = make_mock_response({"Host": "headers.jsontest.com"})
- client_response: Request = await Request(
- method=Request.Method.GET,
- url="very_real_url.com",
- )
- validated_data = client_response.get_validated_data()
- assert client_response.is_valid() is True
- assert validated_data["Host"] == "headers.jsontest.com"
[email protected]
+async def test_get(respx_mock):
+ respx_mock.get(MOCK_REQUEST_URL).mock(
+ make_mock_response({"Host": "headers.jsontest.com"})
+ )
- @pytest.mark.asyncio
- async def test_post(self, mock_send):
+ client_response: Request = await Request(
+ method=Request.Method.GET,
+ url=MOCK_REQUEST_URL,
+ )
+ validated_data = client_response.get_validated_data()
+ assert client_response.is_valid() is True
+ assert validated_data["Host"] == "headers.jsontest.com"
- payload = {"name": "morpheus", "job": "leader"}
- mock_send.return_value = make_mock_response(payload)
- client_response: Request = await Request(
- method=Request.Method.POST,
- url="very_real_url.com",
- json=payload,
- )
- validated_data = client_response.get_validated_data()
- assert client_response.status == 201
- assert validated_data["job"] == "leader"
- assert validated_data["name"] == "morpheus"
-
- @pytest.mark.asyncio
- async def test_validate_with_model(self, mock_send):
- mock_send.return_value = make_mock_response(
- {
- "name": "morpheus",
- "id": "1",
- "job": "leader",
- "createdAt": "2",
- }
- )
[email protected]
+async def test_post(respx_mock):
- class TestModel(BaseModel):
- name: str
- job: str
- id: str
- createdAt: str
-
- client_response: Request = await Request(
- method=Request.Method.POST,
- url="very_real_url.com",
- json={"name": "morpheus", "job": "leader"},
- validation_model=TestModel,
- )
- assert isinstance(client_response.get_validated_data(), TestModel)
-
- @pytest.mark.asyncio
- async def test_validate_and_fail_with_model(self, mock_send):
- class TestModel(BaseModel):
- name: Literal[str] = "John"
- job: str
-
- payload = {"name": "morpheus", "job": "leader"}
- mock_send.return_value = make_mock_response(payload)
-
- client_response: Request = await Request(
- method=Request.Method.POST,
- url="very_real_url.com",
- json=payload,
- validation_model=TestModel,
- )
- with pytest.raises(Exception):
- client_response.is_valid(raise_exceptions=True)
- assert client_response.has_exception is True
- assert isinstance(client_response.exception, Exception)
-
- @mock.patch("gradio.utils.Request._validate_response_data")
- @pytest.mark.asyncio
- async def test_exception_type(self, validate_response_data, mock_send):
- class ResponseValidationException(Exception):
- message = "Response object is not valid."
-
- validate_response_data.side_effect = Exception()
-
- client_response: Request = await Request(
- method=Request.Method.GET,
- url="very_real_url.com",
- exception_type=ResponseValidationException,
- )
- assert isinstance(client_response.exception, ResponseValidationException)
+ payload = {"name": "morpheus", "job": "leader"}
+ respx_mock.post(MOCK_REQUEST_URL).mock(make_mock_response(payload))
- @pytest.mark.asyncio
- async def test_validate_with_function(self, mock_send):
- mock_send.return_value = make_mock_response({"name": "morpheus", "id": 1})
+ client_response: Request = await Request(
+ method=Request.Method.POST,
+ url=MOCK_REQUEST_URL,
+ json=payload,
+ )
+ validated_data = client_response.get_validated_data()
+ assert client_response.status == 201
+ assert validated_data["job"] == "leader"
+ assert validated_data["name"] == "morpheus"
- def has_name(response):
- if response["name"] is not None:
- return response
- raise Exception
- client_response: Request = await Request(
- method=Request.Method.POST,
- url="very_real_url.com",
- json={"name": "morpheus", "job": "leader"},
- validation_function=has_name,
- )
- validated_data = client_response.get_validated_data()
- assert client_response.is_valid() is True
- assert validated_data["id"] is not None
- assert client_response.exception is None
-
- @pytest.mark.asyncio
- async def test_validate_and_fail_with_function(self, mock_send):
- def has_name(response):
- if response["name"] is not None:
- if response["name"] == "Alex":
- return response
- raise Exception
-
- mock_send.return_value = make_mock_response({"name": "morpheus"})
-
- client_response: Request = await Request(
- method=Request.Method.POST,
- url="very_real_url.com",
- json={"name": "morpheus", "job": "leader"},
- validation_function=has_name,
- )
- assert client_response.is_valid() is False
- with pytest.raises(Exception):
- client_response.is_valid(raise_exceptions=True)
- assert client_response.exception is not None
[email protected]
+async def test_validate_with_model(respx_mock):
+
+ response = make_mock_response(
+ {
+ "name": "morpheus",
+ "id": "1",
+ "job": "leader",
+ "createdAt": "2",
+ }
+ )
+ respx_mock.post(MOCK_REQUEST_URL).mock(response)
+
+ class TestModel(BaseModel):
+ name: str
+ job: str
+ id: str
+ createdAt: str
+
+ client_response: Request = await Request(
+ method=Request.Method.POST,
+ url=MOCK_REQUEST_URL,
+ json={"name": "morpheus", "job": "leader"},
+ validation_model=TestModel,
+ )
+ assert isinstance(client_response.get_validated_data(), TestModel)
+
+
[email protected]
+async def test_validate_and_fail_with_model(respx_mock):
+ class TestModel(BaseModel):
+ name: Literal[str] = "John"
+ job: str
+
+ payload = {"name": "morpheus", "job": "leader"}
+ respx_mock.post(MOCK_REQUEST_URL).mock(make_mock_response(payload))
+
+ client_response: Request = await Request(
+ method=Request.Method.POST,
+ url=MOCK_REQUEST_URL,
+ json=payload,
+ validation_model=TestModel,
+ )
+ with pytest.raises(Exception):
+ client_response.is_valid(raise_exceptions=True)
+ assert client_response.has_exception is True
+ assert isinstance(client_response.exception, Exception)
+
+
[email protected]("gradio.utils.Request._validate_response_data")
[email protected]
+async def test_exception_type(validate_response_data, respx_mock):
+ class ResponseValidationException(Exception):
+ message = "Response object is not valid."
+
+ validate_response_data.side_effect = Exception()
+
+ respx_mock.get(MOCK_REQUEST_URL).mock(Response(201))
+
+ client_response: Request = await Request(
+ method=Request.Method.GET,
+ url=MOCK_REQUEST_URL,
+ exception_type=ResponseValidationException,
+ )
+ assert isinstance(client_response.exception, ResponseValidationException)
+
+
[email protected]
+async def test_validate_with_function(respx_mock):
+
+ respx_mock.post(MOCK_REQUEST_URL).mock(
+ make_mock_response({"name": "morpheus", "id": 1})
+ )
+
+ def has_name(response):
+ if response["name"] is not None:
+ return response
+ raise Exception
+
+ client_response: Request = await Request(
+ method=Request.Method.POST,
+ url=MOCK_REQUEST_URL,
+ json={"name": "morpheus", "job": "leader"},
+ validation_function=has_name,
+ )
+ validated_data = client_response.get_validated_data()
+ assert client_response.is_valid() is True
+ assert validated_data["id"] is not None
+ assert client_response.exception is None
+
+
[email protected]
+async def test_validate_and_fail_with_function(respx_mock):
+ def has_name(response):
+ if response["name"] is not None:
+ if response["name"] == "Alex":
+ return response
+ raise Exception
+
+ respx_mock.post(MOCK_REQUEST_URL).mock(make_mock_response({"name": "morpheus"}))
+
+ client_response: Request = await Request(
+ method=Request.Method.POST,
+ url=MOCK_REQUEST_URL,
+ json={"name": "morpheus", "job": "leader"},
+ validation_function=has_name,
+ )
+ assert client_response.is_valid() is False
+ with pytest.raises(Exception):
+ client_response.is_valid(raise_exceptions=True)
+ assert client_response.exception is not None
if __name__ == "__main__":
| Remove `typing-extensions` dependency
In #1805, we introduced an additional dependency in the `gradio` package: `typing-extensions` in order for `gradio` to work properly with Python 3.7.
However, this dependency is not strictly needed as we can wrap the relevant type checking code with a TYPE_CHECKING block so it does not get executed at runtime, as pointed out by @freddyaboulton.
It would be good to remove this dependency so we lighten the package.
| 2022-07-18T15:53:17 |
|
gradio-app/gradio | 1,834 | gradio-app__gradio-1834 | [
"1673"
] | 3f9ec2c34578ba4b898518812e11069dfe91b50f | diff --git a/gradio/flagging.py b/gradio/flagging.py
--- a/gradio/flagging.py
+++ b/gradio/flagging.py
@@ -10,10 +10,13 @@
import gradio as gr
from gradio import encryptor, utils
+from gradio.documentation import document, set_documentation_group
if TYPE_CHECKING:
from gradio.components import Component
+set_documentation_group("flagging")
+
class FlaggingCallback(ABC):
"""
@@ -54,12 +57,23 @@ def flag(
pass
+@document()
class SimpleCSVLogger(FlaggingCallback):
"""
- A simple example implementation of the FlaggingCallback abstract class
- provided for illustrative purposes.
+ A simplified implementation of the FlaggingCallback abstract class
+ provided for illustrative purposes. Each flagged sample (both the input and output data)
+ is logged to a CSV file on the machine running the gradio app.
+ Example:
+ import gradio as gr
+ def image_classifier(inp):
+ return {'cat': 0.3, 'dog': 0.7}
+ demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label",
+ flagging_callback=SimpleCSVLogger())
"""
+ def __init__(self):
+ pass
+
def setup(self, components: List[Component], flagging_dir: str):
self.components = components
self.flagging_dir = flagging_dir
@@ -95,12 +109,22 @@ def flag(
return line_count
+@document()
class CSVLogger(FlaggingCallback):
"""
- The default implementation of the FlaggingCallback abstract class.
- Logs the input and output data to a CSV file. Supports encryption.
+ The default implementation of the FlaggingCallback abstract class. Each flagged
+ sample (both the input and output data) is logged to a CSV file with headers on the machine running the gradio app.
+ Example:
+ import gradio as gr
+ def image_classifier(inp):
+ return {'cat': 0.3, 'dog': 0.7}
+ demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label",
+ flagging_callback=CSVLogger())
"""
+ def __init__(self):
+ pass
+
def setup(
self,
components: List[Component],
@@ -203,37 +227,38 @@ def replace_flag_at_index(file_content):
return line_count
+@document()
class HuggingFaceDatasetSaver(FlaggingCallback):
"""
- A FlaggingCallback that saves flagged data to a HuggingFace dataset.
+ A callback that saves each flagged sample (both the input and output data)
+ to a HuggingFace dataset.
+ Example:
+ import gradio as gr
+ hf_writer = gr.HuggingFaceDatasetSaver(HF_API_TOKEN, "image-classification-mistakes")
+ def image_classifier(inp):
+ return {'cat': 0.3, 'dog': 0.7}
+ demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label",
+ allow_flagging="manual", flagging_callback=hf_writer)
"""
def __init__(
self,
- hf_foken: str,
+ hf_token: str,
dataset_name: str,
organization: Optional[str] = None,
private: bool = False,
- verbose: bool = True,
):
"""
- Params:
- hf_token (str): The token to use to access the huggingface API.
- dataset_name (str): The name of the dataset to save the data to, e.g.
- "image-classifier-1"
- organization (str): The name of the organization to which to attach
- the datasets. If None, the dataset attaches to the user only.
- private (bool): If the dataset does not already exist, whether it
- should be created as a private dataset or public. Private datasets
- may require paid huggingface.co accounts
- verbose (bool): Whether to print out the status of the dataset
- creation.
+ Parameters:
+ hf_token: The HuggingFace token to use to create (and write the flagged sample to) the HuggingFace dataset.
+ dataset_name: The name of the dataset to save the data to, e.g. "image-classifier-1"
+ organization: The organization to save the dataset under. The hf_token must provide write access to this organization. If not provided, saved under the name of the user corresponding to the hf_token.
+ private: Whether the dataset should be private (defaults to False).
"""
- self.hf_foken = hf_foken
+ self.hf_token = hf_token
self.dataset_name = dataset_name
self.organization_name = organization
self.dataset_private = private
- self.verbose = verbose
def setup(self, components: List[Component], flagging_dir: str):
"""
@@ -250,7 +275,7 @@ def setup(self, components: List[Component], flagging_dir: str):
)
path_to_dataset_repo = huggingface_hub.create_repo(
name=self.dataset_name,
- token=self.hf_foken,
+ token=self.hf_token,
private=self.dataset_private,
repo_type="dataset",
exist_ok=True,
@@ -262,7 +287,7 @@ def setup(self, components: List[Component], flagging_dir: str):
self.repo = huggingface_hub.Repository(
local_dir=self.dataset_dir,
clone_from=path_to_dataset_repo,
- use_auth_token=self.hf_foken,
+ use_auth_token=self.hf_token,
)
self.repo.git_pull()
diff --git a/gradio/interface.py b/gradio/interface.py
--- a/gradio/interface.py
+++ b/gradio/interface.py
@@ -45,10 +45,10 @@
@document("launch", "load", "from_pipeline", "integrate")
class Interface(Blocks):
"""
- The Interface class is Gradio's main high-level abstraction, and allows you to create a
- web-based GUI / demo around a machine learning model (or any Python function). You must specify
- three parameters: (1) the function to create a GUI for (2) the desired input components and
- (3) the desired output components. Further parameters can be specified to control the appearance
+ Interface is Gradio's main high-level class, and allows you to create a web-based GUI / demo
+ around a machine learning model (or any Python function) in a few lines of code.
+ You must specify three parameters: (1) the function to create a GUI for (2) the desired input components and
+ (3) the desired output components. Additional parameters can be used to control the appearance
and behavior of the demo.
Example:
| Add missing classes and functions to docs page
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
As a gradio user, I can't find all the functionality gradio has to offer documented in the Docs page.
For example, I can't find `gr.update` (https://github.com/gradio-app/gradio/issues/1647), the different flagging callbacks, and some components like `Interpretation` on the docs page.
Some of these concepts, like the flagging callbacks, are covered in the guides but I think they should also be in the docs page. This will make it easier for developers and users to find all the public apis in one place as opposed to having to click through different guides and skim through the text to the part they need.
**Describe the solution you'd like**
Document all user-facing functionality on the Docs page. We may need to reorganize the Docs page too, like adding more subheadings beyond `Interface` and `Components`.
| Adding the brainstorming tag cause this can definitely be fleshed out more. | 2022-07-19T21:43:07 |
|
gradio-app/gradio | 1,842 | gradio-app__gradio-1842 | [
"1658"
] | 493cf7c069b9daeab37f6cd617e84cbf1986e7eb | diff --git a/demo/gender_sentence_custom_interpretation/run.py b/demo/gender_sentence_custom_interpretation/run.py
--- a/demo/gender_sentence_custom_interpretation/run.py
+++ b/demo/gender_sentence_custom_interpretation/run.py
@@ -14,6 +14,8 @@ def gender_of_sentence(sentence):
return {"male": male_count / total, "female": female_count / total}
+# Number of arguments to interpretation function must
+# match number of inputs to prediction function
def interpret_gender(sentence):
result = gender_of_sentence(sentence)
is_male = result["male"] > result["female"]
@@ -28,7 +30,9 @@ def interpret_gender(sentence):
):
score = -1
interpretation.append((word, score))
- return interpretation
+ # Output must be a list of lists containing the same number of elements as inputs
+ # Each element corresponds to the interpretation scores for the given input
+ return [interpretation]
demo = gr.Interface(
diff --git a/gradio/interface.py b/gradio/interface.py
--- a/gradio/interface.py
+++ b/gradio/interface.py
@@ -160,7 +160,7 @@ def __init__(
cache_examples: If True, caches examples in the server for fast runtime in examples. The default option in HuggingFace Spaces is True. The default option elsewhere is False.
examples_per_page: If examples are provided, how many to display per page.
live: whether the interface should automatically rerun if any of the inputs change.
- interpretation: function that provides interpretation explaining prediction output. Pass "default" to use simple built-in interpreter, "shap" to use a built-in shapley-based interpreter, or your own custom interpretation function.
+ interpretation: function that provides interpretation explaining prediction output. Pass "default" to use simple built-in interpreter, "shap" to use a built-in shapley-based interpreter, or your own custom interpretation function. For more information on the different interpretation methods, see the Advanced Interface Features guide.
num_shap: a multiplier that determines how many examples are computed for shap-based interpretation. Increasing this value will increase shap runtime, but improve results. Only applies if interpretation is "shap".
title: a title for the interface; if provided, appears above the input and output components in large font. Also used as the tab title when opened in a browser window.
description: a description for the interface; if provided, appears above the input and output components and beneath the title in regular font. Accepts Markdown and HTML content.
| It is unclear what the api of the custom interpretation function should be
### Describe the bug
The `Interface` docs say that the `interpretation` parameter can be "your own custom interpretation function.". However, it's not clear to me what the api of that function should be.
There's an example in the `advanced_interface_features` guide but it doesn't work when I run it locally. After hitting "Interpret" it just hangs there:
```python
import re
import gradio as gr
male_words, female_words = ["he", "his", "him"], ["she", "hers", "her"]
def gender_of_sentence(sentence):
male_count = len([word for word in sentence.split() if word.lower() in male_words])
female_count = len(
[word for word in sentence.split() if word.lower() in female_words]
)
total = max(male_count + female_count, 1)
return {"male": male_count / total, "female": female_count / total}
def interpret_gender(sentence):
result = gender_of_sentence(sentence)
is_male = result["male"] > result["female"]
interpretation = []
for word in re.split("( )", sentence):
score = 0
token = word.lower()
if (is_male and token in male_words) or (not is_male and token in female_words):
score = 1
elif (is_male and token in female_words) or (
not is_male and token in male_words
):
score = -1
interpretation.append((word, score))
return interpretation
demo = gr.Interface(
fn=gender_of_sentence,
inputs=gr.Textbox(value="She went to his house to get her keys."),
outputs="label",
interpretation=interpret_gender,
)
demo.launch()
```

### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Run the demo above.
### Screenshot
_No response_
### Logs
Looking at the api response in this case, looks like we're returning `["She",1]` but judging from the response of "default" interpretation, we should be returning a list of lists.
### System Info
```shell
-
```
### Severity
serious, but I can work around it
| This is both a docs and a bug issue since the example doesn't work and the docs are not very clear so adding it to the docs milestone. | 2022-07-20T16:59:33 |
|
gradio-app/gradio | 1,851 | gradio-app__gradio-1851 | [
"1659"
] | ccd8e18a174c1ac261a0f52f0175ad1165eeb03a | diff --git a/demo/blocks_interpretation/run.py b/demo/blocks_interpretation/run.py
new file mode 100644
--- /dev/null
+++ b/demo/blocks_interpretation/run.py
@@ -0,0 +1,57 @@
+import gradio as gr
+import shap
+from transformers import pipeline
+import matplotlib
+import matplotlib.pyplot as plt
+matplotlib.use('Agg')
+
+
+sentiment_classifier = pipeline("text-classification", return_all_scores=True)
+
+
+def classifier(text):
+ pred = sentiment_classifier(text)
+ return {p["label"]: p["score"] for p in pred[0]}
+
+
+def interpretation_function(text):
+ explainer = shap.Explainer(sentiment_classifier)
+ shap_values = explainer([text])
+ # Dimensions are (batch size, text size, number of classes)
+ # Since we care about positive sentiment, use index 1
+ scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))
+
+ scores_desc = sorted(scores, key=lambda t: t[1])[::-1]
+
+ # Filter out empty string added by shap
+ scores_desc = [t for t in scores_desc if t[0] != ""]
+
+ fig_m = plt.figure()
+ plt.bar(x=[s[0] for s in scores_desc[:5]],
+ height=[s[1] for s in scores_desc[:5]])
+ plt.title("Top words contributing to positive sentiment")
+ plt.ylabel("Shap Value")
+ plt.xlabel("Word")
+ return {"original": text, "interpretation": scores}, fig_m
+
+
+with gr.Blocks() as demo:
+ with gr.Row():
+ with gr.Column():
+ input_text = gr.Textbox(label="Input Text")
+ with gr.Row():
+ classify = gr.Button("Classify Sentiment")
+ interpret = gr.Button("Interpret")
+ with gr.Column():
+ label = gr.Label(label="Predicted Sentiment")
+ with gr.Column():
+ with gr.Tabs():
+ with gr.TabItem("Display interpretation with built-in component"):
+ interpretation = gr.components.Interpretation(input_text)
+ with gr.TabItem("Display interpretation with plot"):
+ interpretation_plot = gr.Plot()
+
+ classify.click(classifier, input_text, label)
+ interpret.click(interpretation_function, input_text, [interpretation, interpretation_plot])
+
+demo.launch()
\ No newline at end of file
| Add a guide on using interpretations from Blocks
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
The docs only explain interpretations in the context of `Interface`s but it's possible to add interpretations to a demo built with the blocks api.
**Describe the solution you'd like**
Write a guide on how to add interpretations to a demo built with the blocks api.
**Additional context**
Add any other context or screenshots about the feature request here.
| 2022-07-21T19:34:25 |
||
gradio-app/gradio | 1,866 | gradio-app__gradio-1866 | [
"1863"
] | babbb7eb4105568cff92d9ce17b3846fd07bb66f | diff --git a/gradio/__init__.py b/gradio/__init__.py
--- a/gradio/__init__.py
+++ b/gradio/__init__.py
@@ -26,6 +26,7 @@
Highlightedtext,
HighlightedText,
Image,
+ Interpretation,
Json,
Label,
Markdown,
diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -4015,10 +4015,13 @@ def style(self):
############################
+@document()
class Dataset(Clickable, Component):
"""
- Used to create a output widget for showing datasets. Used to render the examples
- box in the interface.
+ Used to create an output widget for showing datasets. Used to render the examples
+ box.
+ Preprocessing: this component does *not* accept input.
+ Postprocessing: expects a {list} of {lists} corresponding to the dataset data.
"""
def __init__(
@@ -4088,9 +4091,12 @@ def style(
)
+@document()
class Interpretation(Component):
"""
Used to create an interpretation widget for a component.
+ Preprocessing: this component does *not* accept input.
+ Postprocessing: expects a {dict} with keys "original" and "interpretation".
"""
def __init__(
@@ -4101,6 +4107,12 @@ def __init__(
elem_id: Optional[str] = None,
**kwargs,
):
+ """
+ Parameters:
+ component: Which component to show in the interpretation widget.
+ visible: Whether or not the interpretation is visible.
+ elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
+ """
Component.__init__(self, visible=visible, elem_id=elem_id, **kwargs)
self.component = component
diff --git a/website/homepage/src/docs/__init__.py b/website/homepage/src/docs/__init__.py
--- a/website/homepage/src/docs/__init__.py
+++ b/website/homepage/src/docs/__init__.py
@@ -56,7 +56,8 @@ def add_supported_events():
component["events"].append("edit()")
if issubclass(component["class"], Submittable):
component["events"].append("submit()")
- component["events"] = ", ".join(component["events"])
+ if component["events"]:
+ component["events"] = ", ".join(component["events"])
add_supported_events()
@@ -79,6 +80,7 @@ def override_signature(name, signature):
override_signature("Tabs", "with gradio.Tabs():")
override_signature("Group", "with gradio.Group():")
override_signature("Box", "with gradio.Box():")
+override_signature("Dataset", "gr.Dataset(components, samples)")
def find_cls(target_cls):
| Add documentation for the Dataset component
- [x] I have searched to see if a similar issue already exists.
If I go to https://gradio.app/docs/#examples-header, it mentions that `gr.Examples` is a wrapper around the `Dataset` component, but I don't find any info on this component. Docs and info on `gr.Dataset` would be super useful!
| CC @freddyaboulton | 2022-07-22T20:39:23 |
|
gradio-app/gradio | 1,882 | gradio-app__gradio-1882 | [
"1359"
] | ba65a95e070f4be1a6727e4bd262633770360a7c | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -205,8 +205,10 @@ def update(**kwargs) -> dict:
Updates component parameters.
This is a shorthand for using the update method on a component.
For example, rather than using gr.Number.update(...) you can just use gr.update(...).
+ Note that your editor's autocompletion will suggest proper parameters
+ if you use the update method on the component.
- Demos: blocks_update, blocks_essay_update
+ Demos: blocks_essay, blocks_update, blocks_essay_update
Parameters:
kwargs: Key-word arguments used to update the component's properties.
| Add component update to docs
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
In workflows with multiple steps it might be necessary to make the user choose based on output for model 1 for subsequent models.
Currently, the radio and dropdown fields only support changing the active value but not all the values.
**Describe the solution you'd like**
Like updating the textbox i'd like to be able to do the following:
```
import gradio as gr
def update(name):
return [f'Hello {name}',f'Bye {name}', f'Good morning {name}' ]
demo = gr.Blocks()
with demo:
inp = gr.Textbox(placeholder="What is your name?")
out = gr.Radio(label="Greeting to use")
inp.change(fn=update,
inputs=inp,
outputs=out)
demo.launch()
```
| This is actually possible with `Blocks`! We still need to add documentation around it, but in your case, all you need to do is modify the return statement to use the special `.update()` method that every component defines, like this:
```py
def update(name):
return gr.Radio.update(choices=[f'Hello {name}',f'Bye {name}', f'Good morning {name}' ])
demo = gr.Blocks()
with demo:
inp = gr.Textbox(placeholder="What is your name?")
out = gr.Radio(choices=[], label="Greeting to use")
inp.change(fn=update,
inputs=inp,
outputs=out)
demo.launch()
```
<img width="626" alt="image" src="https://user-images.githubusercontent.com/1778297/169883905-669e825c-ee0b-4dd2-bf11-009ec09fc30d.png">
Ah thanks. I tried this using `gr.radio.update(["test1", "test2"])` . Adding documentation for this would be good
Opening the issue until update is added to docs.
@abidlabs @FarukOzderim Does this issue track adding the `update` method for each component to the Docs page?
We already added `gr.update` to the Docs page under "blocks utilities" and "Introduction to Blocks" already includes an "Updating Component Configurations" section.
I think we should close unless we specifically want to add the `update` method for each component to the docs page.
It should stay open because we need to document Component.update as we have it in our library as well, even if we have it in the guides. Btw, IMO Component.update should be the suggested usage because it has autocompletion for the function parameters.
But no need to list each of them seperately, we can just show an example of it in gr.update and suggest using it since it will have parameter tracking/autocompletion. | 2022-07-25T22:25:02 |
|
gradio-app/gradio | 1,892 | gradio-app__gradio-1892 | [
"1673"
] | 3a58dd192479aaa638bc71905ec26c7aff828e44 | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -188,7 +188,7 @@ def serialize(self, x: Any, called_directly: bool) -> Any:
Convert from a human-readable version of the input (path of an image, URL of a video, etc.) into the interface to a serialized version (e.g. base64) to pass into an API. May do different things if the interface is called() vs. used via GUI.
Parameters:
x: Input to interface
- called_directly: if true, the interface was called(), otherwise, it is being used via the GUI
+ called_directly: if True, the interface was called(), otherwise, it is being used via the GUI
"""
return x
@@ -254,6 +254,13 @@ def style(
border: Optional[bool | Tuple[bool, bool, bool, bool]] = None,
container: Optional[bool] = None,
):
+ """
+ This method can be used to change the appearance of the component.
+ Parameters:
+ rounded: If True, will round the corners. If a tuple, will round corners according to the values in the tuple, starting from top left and proceeding clock-wise.
+ border: If True, will add border. If a tuple, will add edges according to the values in the tuple, starting from top and proceeding clock-wise.
+ container: If True, will place the component in a container - providing some extra padding around the border.
+ """
if rounded is not None:
self._style["rounded"] = rounded
if border is not None:
@@ -269,7 +276,7 @@ def add_interactive_to_config(config, interactive):
return config
-@document()
+@document("change", "submit", "style")
class Textbox(Changeable, Submittable, IOComponent):
"""
Creates a textarea for user to enter string input or display string output.
@@ -373,7 +380,7 @@ def serialize(self, x: Any, called_directly: bool) -> Any:
Convert from a human-readable version of the input (path of an image, URL of a video, etc.) into the interface to a serialized version (e.g. base64) to pass into an API. May do different things if the interface is called() vs. used via GUI.
Parameters:
x: Input to interface
- called_directly: if true, the interface was called(), otherwise, it is being used via the GUI
+ called_directly: if True, the interface was called(), otherwise, it is being used via the GUI
"""
return x
@@ -464,7 +471,7 @@ def deserialize(self, x):
return x
-@document()
+@document("change", "submit", "style")
class Number(Changeable, Submittable, IOComponent):
"""
Creates a numeric field for user to enter numbers as input or display numeric output.
@@ -648,7 +655,7 @@ def deserialize(self, y):
return y
-@document()
+@document("change", "style")
class Slider(Changeable, IOComponent):
"""
Creates a slider that ranges from `minimum` to `maximum` with a step size of `step`.
@@ -802,13 +809,18 @@ def style(
self,
container: Optional[bool] = None,
):
+ """
+ This method can be used to change the appearance of the slider.
+ Parameters:
+ container: If True, will place the component in a container - providing some extra padding around the border.
+ """
return IOComponent.style(
self,
container=container,
)
-@document()
+@document("change", "style")
class Checkbox(Changeable, IOComponent):
"""
Creates a checkbox that can be set to `True` or `False`.
@@ -931,7 +943,7 @@ def deserialize(self, x):
return x
-@document()
+@document("change", "style")
class CheckboxGroup(Changeable, IOComponent):
"""
Creates a set of checkboxes of which a subset can be checked.
@@ -1091,6 +1103,13 @@ def style(
item_container: Optional[bool] = None,
container: Optional[bool] = None,
):
+ """
+ This method can be used to change the appearance of the CheckboxGroup.
+ Parameters:
+ rounded: If True, will round the corners. If a tuple, will round corners according to the values in the tuple, starting from top left and proceeding clock-wise.
+ item_container: If True, will place the items in a container.
+ container: If True, will place the component in a container - providing some extra padding around the border.
+ """
if item_container is not None:
self._style["item_container"] = item_container
@@ -1101,7 +1120,7 @@ def style(
)
-@document()
+@document("change", "style")
class Radio(Changeable, IOComponent):
"""
Creates a set of radio buttons of which only one can be selected.
@@ -1243,6 +1262,12 @@ def style(
item_container: Optional[bool] = None,
container: Optional[bool] = None,
):
+ """
+ This method can be used to change the appearance of the radio component.
+ Parameters:
+ item_container: If True, will place items in a container.
+ container: If True, will place the component in a container - providing some extra padding around the border.
+ """
if item_container is not None:
self._style["item_container"] = item_container
@@ -1252,7 +1277,7 @@ def style(
)
-@document()
+@document("change", "style")
class Dropdown(Radio):
"""
Creates a dropdown of which only one entry can be selected.
@@ -1305,12 +1330,19 @@ def style(
border: Optional[bool | Tuple[bool, bool, bool, bool]] = None,
container: Optional[bool] = None,
):
+ """
+ This method can be used to change the appearance of the Dropdown.
+ Parameters:
+ rounded: If True, will round the corners. If a tuple, will round corners according to the values in the tuple, starting from top left and proceeding clock-wise.
+ border: If True, will add border. If a tuple, will add edges according to the values in the tuple, starting from top and proceeding clock-wise.
+ container: If True, will place the component in a container - providing some extra padding around the border.
+ """
return IOComponent.style(
self, rounded=rounded, border=border, container=container
)
-@document()
+@document("edit", "clear", "change", "stream", "change")
class Image(Editable, Clearable, Changeable, Streamable, IOComponent):
"""
Creates an image component that can be used to upload/draw images (as an input) or display images (as an output).
@@ -1650,6 +1682,13 @@ def style(
height: Optional[int] = None,
width: Optional[int] = None,
):
+ """
+ This method can be used to change the appearance of the Image component.
+ Parameters:
+ rounded: If True, will round the corners. If a tuple, will round corners according to the values in the tuple, starting from top left and proceeding clock-wise.
+ height: Height of the image.
+ width: Width of the image.
+ """
self._style["height"] = height
self._style["width"] = width
return IOComponent.style(
@@ -1665,19 +1704,20 @@ def stream(
_js: Optional[str] = None,
):
"""
+ This event is triggered when the user streams the component (e.g. a live webcam
+ component)
Parameters:
fn: Callable function
inputs: List of inputs
outputs: List of outputs
- _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- Returns: None
"""
+ # js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
if self.source != "webcam":
raise ValueError("Image streaming only available if source is 'webcam'.")
Streamable.stream(self, fn, inputs, outputs, _js)
-@document()
+@document("change", "clear", "play", "pause", "stop", "style")
class Video(Changeable, Clearable, Playable, IOComponent):
"""
Creates an video component that can be used to upload/record videos (as an input) or display videos (as an output).
@@ -1848,6 +1888,13 @@ def style(
height: Optional[int] = None,
width: Optional[int] = None,
):
+ """
+ This method can be used to change the appearance of the video component.
+ Parameters:
+ rounded: If True, will round the corners. If a tuple, will round corners according to the values in the tuple, starting from top left and proceeding clock-wise.
+ height: Height of the video.
+ width: Width of the video.
+ """
self._style["height"] = height
self._style["width"] = width
return IOComponent.style(
@@ -1856,7 +1903,7 @@ def style(
)
-@document()
+@document("change", "clear", "play", "pause", "stop", "stream", "style")
class Audio(Changeable, Clearable, Playable, Streamable, IOComponent):
"""
Creates an audio component that can be used to upload/record audio (as an input) or display audio (as an output).
@@ -1889,7 +1936,7 @@ def __init__(
show_label: if True, will display label.
interactive: if True, will allow users to upload and edit a audio file; if False, can only be used to play audio. If not provided, this is inferred based on whether the component is used as an input or output.
visible: If False, component will be hidden.
- streaming: If set to true when used in a `live` interface, will automatically stream webcam feed. Only valid is source is 'microphone'.
+ streaming: If set to True when used in a `live` interface, will automatically stream webcam feed. Only valid is source is 'microphone'.
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
"""
self.value = self.postprocess(value)
@@ -2152,13 +2199,14 @@ def stream(
_js: Optional[str] = None,
):
"""
+ This event is triggered when the user streams the component (e.g. a live webcam
+ component)
Parameters:
fn: Callable function
inputs: List of inputs
outputs: List of outputs
- _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- Returns: None
"""
+ # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
if self.source != "microphone":
raise ValueError(
"Audio streaming only available if source is 'microphone'."
@@ -2169,13 +2217,18 @@ def style(
self,
rounded: Optional[bool | Tuple[bool, bool, bool, bool]] = None,
):
+ """
+ This method can be used to change the appearance of the audio component.
+ Parameters:
+ rounded: If True, will round the corners. If a tuple, will round corners according to the values in the tuple, starting from top left and proceeding clock-wise.
+ """
return IOComponent.style(
self,
rounded=rounded,
)
-@document()
+@document("change", "clear", "style")
class File(Changeable, Clearable, IOComponent):
"""
Creates a file component that allows uploading generic file (when used as an input) and or displaying generic files (output).
@@ -2367,13 +2420,18 @@ def style(
self,
rounded: Optional[bool | Tuple[bool, bool, bool, bool]] = None,
):
+ """
+ This method can be used to change the appearance of the file component.
+ Parameters:
+ rounded: If True, will round the corners. If a tuple, will round corners according to the values in the tuple, starting from top left and proceeding clock-wise.
+ """
return IOComponent.style(
self,
rounded=rounded,
)
-@document()
+@document("change", "style")
class Dataframe(Changeable, IOComponent):
"""
Accepts or displays 2D input through a spreadsheet-like component for dataframes.
@@ -2628,13 +2686,18 @@ def style(
self,
rounded: Optional[bool | Tuple[bool, bool, bool, bool]] = None,
):
+ """
+ This method can be used to change the appearance of the DataFrame component.
+ Parameters:
+ rounded: If True, will round the corners. If a tuple, will round corners according to the values in the tuple, starting from top left and proceeding clock-wise.
+ """
return IOComponent.style(
self,
rounded=rounded,
)
-@document()
+@document("change", "style")
class Timeseries(Changeable, IOComponent):
"""
Creates a component that can be used to upload/preview timeseries csv files or display a dataframe consisting of a time series graphically.
@@ -2770,6 +2833,11 @@ def style(
self,
rounded: Optional[bool | Tuple[bool, bool, bool, bool]] = None,
):
+ """
+ This method can be used to change the appearance of the TimeSeries component.
+ Parameters:
+ rounded: If True, will round the corners. If a tuple, will round corners according to the values in the tuple, starting from top left and proceeding clock-wise.
+ """
return IOComponent.style(
self,
rounded=rounded,
@@ -2806,7 +2874,7 @@ def style(self):
return self
-@document()
+@document("click", "style")
class Button(Clickable, IOComponent):
"""
Used to create a button, that can be assigned arbitrary click() events. The label (value) of the button can be used as an input or set via the output of a function.
@@ -2863,6 +2931,13 @@ def style(
border: Optional[bool | Tuple[bool, bool, bool, bool]] = None,
margin: Optional[bool | Tuple[bool, bool, bool, bool]] = None,
):
+ """
+ This method can be used to change the appearance of the button component.
+ Parameters:
+ rounded: If True, will round the corners. If a tuple, will round corners according to the values in the tuple, starting from top left and proceeding clock-wise.
+ full_width: If True, the button will span the full width of the container.
+ border: If True, will include a border. If a tuple, will add borders according to values in the tuple, where the elements correspond to top, right, bottom, left edge.
+ """
if full_width is not None:
self._style["full_width"] = full_width
if margin is not None:
@@ -2875,7 +2950,7 @@ def style(
)
-@document()
+@document("change", "submit", "style")
class ColorPicker(Changeable, Submittable, IOComponent):
"""
Creates a color picker for user to select a color as string input.
@@ -2991,7 +3066,7 @@ def deserialize(self, x):
############################
-@document()
+@document("change", "style")
class Label(Changeable, IOComponent):
"""
Displays a classification label, along with confidence scores of top categories, if provided.
@@ -3126,10 +3201,15 @@ def style(
self,
container: Optional[bool] = None,
):
+ """
+ This method can be used to change the appearance of the label component.
+ Parameters:
+ container: If True, will add a container to the label - providing some extra padding around the border.
+ """
return IOComponent.style(self, container=container)
-@document()
+@document("change", "style")
class HighlightedText(Changeable, IOComponent):
"""
Displays text that contains spans that are highlighted by category or numerical value.
@@ -3269,10 +3349,11 @@ def style(
container: Optional[bool] = None,
):
"""
+ This method can be used to change the appearance of the HighlightedText component.
Parameters:
- rounded: If True, will round the corners of the text. If a tuple, will round the corners of the text according to the values in the tuple, starting from top left and proceeding clock-wise.
+ rounded: If True, will round the corners of the text. If a tuple, will round the corners according to the values in the tuple, starting from top left and proceeding clock-wise.
color_map: Map between category and respective colors.
- container: If True, will place the component in a container.
+ container: If True, will place the component in a container - providing some extra padding around the border.
"""
if color_map is not None:
self._style["color_map"] = color_map
@@ -3280,7 +3361,7 @@ def style(
return IOComponent.style(self, rounded=rounded, container=container)
-@document()
+@document("change", "style")
class JSON(Changeable, IOComponent):
"""
Used to display arbitrary JSON output prettily.
@@ -3360,10 +3441,15 @@ def restore_flagged(self, dir, data, encryption_key):
return json.loads(data)
def style(self, container: Optional[bool] = None):
+ """
+ This method can be used to change the appearance of the JSON component.
+ Parameters:
+ container: If True, will place the JSON in a container - providing some extra padding around the border.
+ """
return IOComponent.style(self, container=container)
-@document()
+@document("change")
class HTML(Changeable, IOComponent):
"""
Used to display arbitrary HTML output.
@@ -3427,7 +3513,7 @@ def style(self):
return self
-@document()
+@document("style")
class Gallery(IOComponent):
"""
Used to display a list of images as a gallery that can be scrolled through.
@@ -3518,6 +3604,13 @@ def style(
height: Optional[str] = None,
container: Optional[bool] = None,
):
+ """
+ This method can be used to change the appearance of the gallery component.
+ Parameters:
+ rounded: If True, will round the corners. If a tuple, will round corners according to the values in the tuple, starting from top left and proceeding clock-wise.
+ height: Height of the gallery.
+ container: If True, will place gallery in a container - providing some extra padding around the border.
+ """
if grid is not None:
self._style["grid"] = grid
if height is not None:
@@ -3632,7 +3725,7 @@ def restore_flagged(self, dir, data, encryption_key):
]
-@document()
+@document("change", "style")
class Chatbot(Changeable, IOComponent):
"""
Displays a chatbot output showing both user submitted messages and responses
@@ -3715,8 +3808,16 @@ def postprocess(self, y: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
def style(
self,
rounded: Optional[bool | Tuple[bool, bool, bool, bool]] = None,
- color_map: Optional[Dict[str, str]] = None,
+ color_map: Optional[List[str, str]] = None,
):
+ """
+ This method can be used to change the appearance of the Chatbot component.
+ Parameters:
+ rounded: If True, whether the chat bubbles should be rounded. If a tuple, will round the corners of the bubble according to the values in the tuple, starting from top left and proceeding clock-wise.
+ color_map: List containing colors to apply to chat bubbles.
+ Returns:
+
+ """
if color_map is not None:
self._style["color_map"] = color_map
@@ -3726,7 +3827,7 @@ def style(
)
-@document()
+@document("change", "edit", "clear", "style")
class Model3D(Changeable, Editable, Clearable, IOComponent):
"""
Component allows users to upload or view 3D Model files (.obj, .glb, or .gltf).
@@ -3858,13 +3959,18 @@ def style(
self,
rounded: Optional[bool | Tuple[bool, bool, bool, bool]] = None,
):
+ """
+ This method can be used to change the appearance of the Model3D component.
+ Args:
+ rounded: If True, will round the corners of the Model3D component. If a tuple, will round the corners of the Model3D according to the values in the tuple, starting from top left and proceeding clock-wise.
+ """
return IOComponent.style(
self,
rounded=rounded,
)
-@document()
+@document("change", "clear")
class Plot(Changeable, Clearable, IOComponent):
"""
Used to display various kinds of plots (matplotlib, plotly, or bokeh are supported)
@@ -3954,7 +4060,7 @@ def restore_flagged(self, dir, data, encryption_key):
return json.loads(data)
-@document()
+@document("change")
class Markdown(IOComponent, Changeable):
"""
Used to render arbitrary Markdown output.
@@ -4021,7 +4127,7 @@ def style(self):
############################
-@document()
+@document("click", "style")
class Dataset(Clickable, Component):
"""
Used to create an output widget for showing datasets. Used to render the examples
@@ -4090,6 +4196,12 @@ def style(
rounded: Optional[bool | Tuple[bool, bool, bool, bool]] = None,
border: Optional[bool | Tuple[bool, bool, bool, bool]] = None,
):
+ """
+ This method can be used to change the appearance of the Dataset component.
+ Parameters:
+ rounded: If True, will round the all corners of the dataset. If a tuple, will round the corners of the dataset according to the values in the tuple, starting from top left and proceeding clock-wise.
+ border: If True, will include a border for all edges of the dataset. If a tuple, will add edges according to the values in the tuple, starting from top and proceeding clock-wise.
+ """
return IOComponent.style(
self,
rounded=rounded,
diff --git a/gradio/documentation.py b/gradio/documentation.py
--- a/gradio/documentation.py
+++ b/gradio/documentation.py
@@ -115,7 +115,7 @@ def document_fn(fn: Callable) -> Tuple[str, List[Dict], Dict, Optional[str]]:
def document_cls(cls):
doc_str = inspect.getdoc(cls)
if doc_str is None:
- return "", {}
+ return "", {}, ""
tags = {}
description_lines = []
mode = "description"
diff --git a/gradio/events.py b/gradio/events.py
--- a/gradio/events.py
+++ b/gradio/events.py
@@ -25,7 +25,7 @@ def change(
):
"""
This event is triggered when the component's input value changes (e.g. when the user types in a textbox
- or uploads an image)
+ or uploads an image). This method can be used when this component is in a Gradio Blocks.
Parameters:
fn: Callable function
@@ -35,9 +35,11 @@ def change(
status_tracker: StatusTracker to visualize function progress
scroll_to_output: If True, will scroll to output component on completion
show_progress: If True, will show progress animation while pending
- _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of input and outputs components, return should be a list of values for output component.
- Returns: None
+ queue: If True, will place the request on the queue, if the queue exists
"""
+ # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
+ # _preprocess: If False, will not run preprocessing of component data before running 'fn'.
+ # _postprocess: If False, will not run postprocessing of component data before returning 'fn' output.
self.set_event_trigger(
"change",
fn,
@@ -71,6 +73,7 @@ def click(
):
"""
This event is triggered when the component (e.g. a button) is clicked.
+ This method can be used when this component is in a Gradio Blocks.
Parameters:
fn: Callable function
@@ -80,11 +83,11 @@ def click(
status_tracker: StatusTracker to visualize function progress
scroll_to_output: If True, will scroll to output component on completion
show_progress: If True, will show progress animation while pending
- _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- _preprocess: If False, will not run preprocessing of component data before running 'fn'.
- _postprocess: If False, will not run postprocessing of component data before returning 'fn' output.
- Returns: None
+ queue: If True, will place the request on the queue, if the queue exists
"""
+ # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
+ # _preprocess: If False, will not run preprocessing of component data before running 'fn'.
+ # _postprocess: If False, will not run postprocessing of component data before returning 'fn' output.
self.set_event_trigger(
"click",
fn,
@@ -118,6 +121,8 @@ def submit(
):
"""
This event is triggered when the user presses the Enter key while the component (e.g. a textbox) is focused.
+ This method can be used when this component is in a Gradio Blocks.
+
Parameters:
fn: Callable function
@@ -127,9 +132,11 @@ def submit(
status_tracker: StatusTracker to visualize function progress
scroll_to_output: If True, will scroll to output component on completion
show_progress: If True, will show progress animation while pending
- _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- Returns: None
+ queue: If True, will place the request on the queue, if the queue exists
"""
+ # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
+ # _preprocess: If False, will not run preprocessing of component data before running 'fn'.
+ # _postprocess: If False, will not run postprocessing of component data before returning 'fn' output.
self.set_event_trigger(
"submit",
fn,
@@ -160,16 +167,18 @@ def edit(
):
"""
This event is triggered when the user edits the component (e.g. image) using the
- built-in editor.
+ built-in editor. This method can be used when this component is in a Gradio Blocks.
Parameters:
fn: Callable function
inputs: List of inputs
outputs: List of outputs
api_name: Defining this parameter exposes the endpoint in the api docs
- _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- Returns: None
+ queue: If True, will place the request on the queue, if the queue exists
"""
+ # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
+ # _preprocess: If False, will not run preprocessing of component data before running 'fn'.
+ # _postprocess: If False, will not run postprocessing of component data before returning 'fn' output.
self.set_event_trigger(
"edit",
fn,
@@ -197,16 +206,18 @@ def clear(
):
"""
This event is triggered when the user clears the component (e.g. image or audio)
- using the X button for the component.
+ using the X button for the component. This method can be used when this component is in a Gradio Blocks.
Parameters:
fn: Callable function
inputs: List of inputs
outputs: List of outputs
api_name: Defining this parameter exposes the endpoint in the api docs
- _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- Returns: None
+ queue: If True, will place the request on the queue, if the queue exists
"""
+ # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
+ # _preprocess: If False, will not run preprocessing of component data before running 'fn'.
+ # _postprocess: If False, will not run postprocessing of component data before returning 'fn' output.
self.set_event_trigger(
"submit",
fn,
@@ -233,16 +244,19 @@ def play(
_postprocess: bool = True,
):
"""
- This event is triggered when the user plays the component (e.g. audio or video)
+ This event is triggered when the user plays the component (e.g. audio or video).
+ This method can be used when this component is in a Gradio Blocks.
Parameters:
fn: Callable function
inputs: List of inputs
outputs: List of outputs
api_name: Defining this parameter exposes the endpoint in the api docs
- _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- Returns: None
+ queue: If True, will place the request on the queue, if the queue exists
"""
+ # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
+ # _preprocess: If False, will not run preprocessing of component data before running 'fn'.
+ # _postprocess: If False, will not run postprocessing of component data before returning 'fn' output.
self.set_event_trigger(
"play",
fn,
@@ -267,16 +281,19 @@ def pause(
_postprocess: bool = True,
):
"""
- This event is triggered when the user pauses the component (e.g. audio or video)
+ This event is triggered when the user pauses the component (e.g. audio or video).
+ This method can be used when this component is in a Gradio Blocks.
Parameters:
fn: Callable function
inputs: List of inputs
outputs: List of outputs
api_name: Defining this parameter exposes the endpoint in the api docs
- _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- Returns: None
+ queue: If True, will place the request on the queue, if the queue exists
"""
+ # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
+ # _preprocess: If False, will not run preprocessing of component data before running 'fn'.
+ # _postprocess: If False, will not run postprocessing of component data before returning 'fn' output.
self.set_event_trigger(
"pause",
fn,
@@ -301,16 +318,19 @@ def stop(
_postprocess: bool = True,
):
"""
- This event is triggered when the user stops the component (e.g. audio or video)
+ This event is triggered when the user stops the component (e.g. audio or video).
+ This method can be used when this component is in a Gradio Blocks.
Parameters:
fn: Callable function
inputs: List of inputs
outputs: List of outputs
api_name: Defining this parameter exposes the endpoint in the api docs
- _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- Returns: None
+ queue: If True, will place the request on the queue, if the queue exists
"""
+ # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
+ # _preprocess: If False, will not run preprocessing of component data before running 'fn'.
+ # _postprocess: If False, will not run postprocessing of component data before returning 'fn' output.
self.set_event_trigger(
"stop",
fn,
@@ -339,16 +359,18 @@ def stream(
):
"""
This event is triggered when the user streams the component (e.g. a live webcam
- component)
+ component). This method can be used when this component is in a Gradio Blocks.
Parameters:
fn: Callable function
inputs: List of inputs
outputs: List of outputs
api_name: Defining this parameter exposes the endpoint in the api docs
- _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- Returns: None
+ queue: If True, will place the request on the queue, if the queue exists
"""
+ # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
+ # _preprocess: If False, will not run preprocessing of component data before running 'fn'.
+ # _postprocess: If False, will not run postprocessing of component data before returning 'fn' output.
self.streaming = True
self.set_event_trigger(
"stream",
| Add missing classes and functions to docs page
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
As a gradio user, I can't find all the functionality gradio has to offer documented in the Docs page.
For example, I can't find `gr.update` (https://github.com/gradio-app/gradio/issues/1647), the different flagging callbacks, and some components like `Interpretation` on the docs page.
Some of these concepts, like the flagging callbacks, are covered in the guides but I think they should also be in the docs page. This will make it easier for developers and users to find all the public apis in one place as opposed to having to click through different guides and skim through the text to the part they need.
**Describe the solution you'd like**
Document all user-facing functionality on the Docs page. We may need to reorganize the Docs page too, like adding more subheadings beyond `Interface` and `Components`.
| Adding the brainstorming tag cause this can definitely be fleshed out more.
TODO:
- [x] Interpretation
- [x] Flagging callbacks
- [ ] The `style` method of components
- [x] https://github.com/gradio-app/gradio/issues/1743
- [x] #1773
Oh this shouldn't close just yet | 2022-07-26T19:48:06 |
|
gradio-app/gradio | 1,893 | gradio-app__gradio-1893 | [
"1887"
] | 3f5a9ea19d67996e848f24c7dd881ae20c34178f | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -2462,7 +2462,9 @@ def __init__(
self.__validate_headers(headers, self.col_count[0])
- self.headers = headers
+ self.headers = (
+ headers if headers is not None else list(range(1, self.col_count[0] + 1))
+ )
self.datatype = (
datatype if isinstance(datatype, list) else [datatype] * self.col_count[0]
)
@@ -2482,8 +2484,11 @@ def __init__(
[values[c] for c in column_dtypes] for _ in range(self.row_count[0])
]
- self.value = value if value is not None else self.test_input
- self.value = self.__process_markdown(self.value, datatype)
+ self.value = (
+ self.postprocess(value)
+ if value is not None
+ else self.postprocess(self.test_input)
+ )
self.max_rows = max_rows
self.max_cols = max_cols
@@ -2596,7 +2601,19 @@ def postprocess(self, y: str | pd.DataFrame | np.ndarray | List[List[str | float
if isinstance(y, (np.ndarray, list)):
if isinstance(y, np.ndarray):
y = y.tolist()
+
+ _headers = self.headers
+
+ if len(self.headers) < len(y[0]):
+ _headers = [
+ *self.headers,
+ *list(range(len(self.headers) + 1, len(y[0]) + 1)),
+ ]
+ elif len(self.headers) > len(y[0]):
+ _headers = self.headers[0 : len(y[0])]
+
return {
+ "headers": _headers,
"data": Dataframe.__process_markdown(y, self.datatype),
}
raise ValueError("Cannot process value as a Dataframe")
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -10,6 +10,7 @@
import pandas as pd
import PIL
import pytest
+from requests import head
import gradio as gr
from gradio import media_data
@@ -1038,11 +1039,14 @@ def test_component_functions(self):
"datatype": ["str", "str", "str"],
"row_count": (3, "dynamic"),
"col_count": (3, "dynamic"),
- "value": [
- ["", "", ""],
- ["", "", ""],
- ["", "", ""],
- ],
+ "value": {
+ "data": [
+ ["", "", ""],
+ ["", "", ""],
+ ["", "", ""],
+ ],
+ "headers": ["Name", "Age", "Member"],
+ },
"name": "dataframe",
"show_label": True,
"label": "Dataframe Input",
@@ -1063,26 +1067,11 @@ def test_component_functions(self):
wrong_type = gr.Dataframe(type="unknown")
wrong_type.preprocess(x_data)
- # Output functionalities
dataframe_output = gr.Dataframe()
- output = dataframe_output.postprocess(np.zeros((2, 2)))
- self.assertDictEqual(output, {"data": [[0, 0], [0, 0]]})
- output = dataframe_output.postprocess([[1, 3, 5]])
- self.assertDictEqual(output, {"data": [[1, 3, 5]]})
- output = dataframe_output.postprocess(
- pd.DataFrame([[2, True], [3, True], [4, False]], columns=["num", "prime"])
- )
- self.assertDictEqual(
- output,
- {
- "headers": ["num", "prime"],
- "data": [[2, True], [3, True], [4, False]],
- },
- )
self.assertEqual(
dataframe_output.get_config(),
{
- "headers": None,
+ "headers": [1, 2, 3],
"max_rows": 20,
"max_cols": None,
"overflow_row_behaviour": "paginate",
@@ -1095,15 +1084,38 @@ def test_component_functions(self):
"datatype": ["str", "str", "str"],
"row_count": (3, "dynamic"),
"col_count": (3, "dynamic"),
- "value": [
- ["", "", ""],
- ["", "", ""],
- ["", "", ""],
- ],
+ "value": {
+ "data": [
+ ["", "", ""],
+ ["", "", ""],
+ ["", "", ""],
+ ],
+ "headers": [1, 2, 3],
+ },
"interactive": None,
"wrap": False,
},
)
+
+ def test_postprocess(self):
+ """
+ postprocess
+ """
+ dataframe_output = gr.Dataframe()
+ output = dataframe_output.postprocess(np.zeros((2, 2)))
+ self.assertDictEqual(output, {"data": [[0, 0], [0, 0]], "headers": [1, 2]})
+ output = dataframe_output.postprocess([[1, 3, 5]])
+ self.assertDictEqual(output, {"data": [[1, 3, 5]], "headers": [1, 2, 3]})
+ output = dataframe_output.postprocess(
+ pd.DataFrame([[2, True], [3, True], [4, False]], columns=["num", "prime"])
+ )
+ self.assertDictEqual(
+ output,
+ {
+ "headers": ["num", "prime"],
+ "data": [[2, True], [3, True], [4, False]],
+ },
+ )
with self.assertRaises(ValueError):
wrong_type = gr.Dataframe(type="unknown")
wrong_type.postprocess(0)
@@ -1128,6 +1140,26 @@ def test_component_functions(self):
},
)
+ # When the headers don't match the data
+ dataframe_output = gr.Dataframe(headers=["one", "two", "three"])
+ output = dataframe_output.postprocess([[2, True], [3, True]])
+ self.assertDictEqual(
+ output,
+ {
+ "headers": ["one", "two"],
+ "data": [[2, True], [3, True]],
+ },
+ )
+ dataframe_output = gr.Dataframe(headers=["one", "two", "three"])
+ output = dataframe_output.postprocess([[2, True, "ab", 4], [3, True, "cd", 5]])
+ self.assertDictEqual(
+ output,
+ {
+ "headers": ["one", "two", "three", 4],
+ "data": [[2, True, "ab", 4], [3, True, "cd", 5]],
+ },
+ )
+
def test_in_interface_as_input(self):
"""
Interface, process,
@@ -1135,7 +1167,7 @@ def test_in_interface_as_input(self):
x_data = {"data": [[1, 2, 3], [4, 5, 6]]}
iface = gr.Interface(np.max, "numpy", "number")
self.assertEqual(iface.process([x_data]), [6])
- x_data = {"data": [["Tim"], ["Jon"], ["Sal"]]}
+ x_data = {"data": [["Tim"], ["Jon"], ["Sal"]], "headers": [1, 2, 3]}
def get_last(my_list):
return my_list[-1][-1]
@@ -1153,7 +1185,8 @@ def check_odd(array):
iface = gr.Interface(check_odd, "numpy", "numpy")
self.assertEqual(
- iface.process([{"data": [[2, 3, 4]]}])[0], {"data": [[True, False, True]]}
+ iface.process([{"data": [[2, 3, 4]]}])[0],
+ {"data": [[True, False, True]], "headers": [1, 2, 3]},
)
| Dataframe headers overwritten with lists/arrays
### Describe the bug
When `Dataframe` outputs data which is a Python list/numpy array, the header names get overwritten with numeric indicies.
Maybe this is not a bug but caused by me not using it properly. However, I did not find relevant information in the documentation.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```
import gradio as gr
interface = gr.Interface(
fn=lambda: [
["x1", "y1"],
["x2", "y2"],
["x3", "y3"],
],
inputs=None,
outputs=gr.DataFrame(
col_count=2,
row_count=3,
type="array",
headers=["A", "B"],
interactive=False,
)
)
interface.launch(debug=True)
```
### Screenshot
Before:

After:

### Logs
```shell
No logs available.
```
### System Info
```shell
gradio 3.1.1, Fedora 36, Firefox 102.0
```
### Severity
serious, but I can work around it
| 2022-07-26T19:58:12 |
|
gradio-app/gradio | 1,936 | gradio-app__gradio-1936 | [
"1303"
] | d7db15fb07fe1ac15a26b5eab79b223f9d634807 | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -2419,7 +2419,7 @@ def __init__(
value: Optional[List[List[Any]]] = None,
*,
headers: Optional[List[str]] = None,
- row_count: int | Tuple[int, str] = (3, "dynamic"),
+ row_count: int | Tuple[int, str] = (1, "dynamic"),
col_count: Optional[int | Tuple[int, str]] = None,
datatype: str | List[str] = "str",
type: str = "pandas",
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -1037,13 +1037,11 @@ def test_component_functions(self):
{
"headers": ["Name", "Age", "Member"],
"datatype": ["str", "str", "str"],
- "row_count": (3, "dynamic"),
+ "row_count": (1, "dynamic"),
"col_count": (3, "dynamic"),
"value": {
"data": [
["", "", ""],
- ["", "", ""],
- ["", "", ""],
],
"headers": ["Name", "Age", "Member"],
},
@@ -1082,13 +1080,11 @@ def test_component_functions(self):
"elem_id": None,
"visible": True,
"datatype": ["str", "str", "str"],
- "row_count": (3, "dynamic"),
+ "row_count": (1, "dynamic"),
"col_count": (3, "dynamic"),
"value": {
"data": [
["", "", ""],
- ["", "", ""],
- ["", "", ""],
],
"headers": [1, 2, 3],
},
| 3.0.1 Dataframe empty row
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
@pngwn
I am using gradio 3.0.1 and found that there is a problem with blank rows in Dataframe output: when the data is less than 3 rows, there are blank rows, as shown in the figure.
### Reproduction



### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
gradio 3.0.1
ubuntu 20.04
chrome
```
### Severity
annoyance
| Can you show me the code you are using to generate this Dataframe please.
@pngwn
My project: https://huggingface.co/spaces/Zengyf-CVer/Gradio_YOLOv5_Det_v3
You can enter a picture with a picture of one object or two objects.
Ah thanks.
What is happening here is the dataframe is always respecting the `row_count` as a minimum. If you set row_count to `1` on the Dataframe it works as expected. Not sure what the best behaviour is here.
cc @abidlabs @aliabid94
Just to clarify, the default row_count is `3` which is why those 3 rows are visible.
This seems like reasonable behavior to me for the Dataframe to initialize with the stated number of rows. What we need to do is implement a "delete row / delete column" functionality to address these kinds of situations
This specific case is about whether the rows should match what comes back from the predict function or if it should respect the row count in that case as well. As an output the dataframe wouldn't have add/ delete functionality.
Input and outputs aren't significantly different so what makes sense for an input doesn't _always_ make sense for an output (although I'm not sure that is the case here) but we can modify the behaviour of static vs dynamic components.
When the output Dataframe defaults to 3 lines, it will make users think that there is an illusion of outputting empty lines.
> This specific case is about whether the rows should match what comes back from the predict function or if it should respect the row count in that case as well. As an output the dataframe wouldn't have add/ delete functionality.
Oh sorry I didn't read carefully. Going off of your comment @pngwn, what if we make the default # of rows be 0 or 1. We could do this for static dataframes only, but I don't see it being a problem for dynamic dataframes either (especially right now given that you can only add rows, not delete them).
(sorry accidentally closed) | 2022-08-02T17:44:33 |
gradio-app/gradio | 1,938 | gradio-app__gradio-1938 | [
"1533"
] | ab18e07ff0c42c4a5313eccf320b1bb274d89b28 | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -13,6 +13,7 @@
import pathlib
import shutil
import tempfile
+import uuid
import warnings
from copy import deepcopy
from types import ModuleType
@@ -122,7 +123,7 @@ def save_file(self, file: tempfile._TemporaryFileWrapper, dir: str, label: str):
"""
Saved flagged file and returns filepath
"""
- label = "".join([char for char in label if char.isalnum() or char in "._- "])
+ label = processing_utils.strip_invalid_filename_characters(label)
old_file_name = file.name
output_dir = os.path.join(dir, label)
if os.path.exists(output_dir):
@@ -3618,6 +3619,39 @@ def style(
return IOComponent.style(self, rounded=rounded, container=container)
+ def save_flagged(
+ self, dir: str, label: Optional[str], data: List[str], encryption_key: bool
+ ) -> None | str:
+ if data is None:
+ return None
+
+ label = processing_utils.strip_invalid_filename_characters(label)
+ # join the label with the dir so that one directory stores all gallery
+ # outputs, e.g. <dir>/<component-label>
+ dir = os.path.join(dir, label)
+
+ # Save all the files belonging to this gallery in the gallery_path directory
+ gallery_path = str(uuid.uuid4())
+
+ for img_data in data:
+ self.save_flagged_file(dir, gallery_path, img_data, encryption_key)
+
+ # In the csv file, the row corresponding to this sample will list
+ # the path where all sub-images are stored, e.g. <component-label>/<uuid>
+ return os.path.join(label, gallery_path)
+
+ def restore_flagged(self, dir, data, encryption_key):
+ files = []
+ gallery_path = os.path.join(dir, data)
+ # Sort to preserve order
+ for file in sorted(os.listdir(gallery_path)):
+ file_path = os.path.join(gallery_path, file)
+ img = processing_utils.encode_file_to_base64(
+ file_path, encryption_key=encryption_key
+ )
+ files.append(img)
+ return files
+
class Carousel(IOComponent, Changeable):
"""
diff --git a/gradio/processing_utils.py b/gradio/processing_utils.py
--- a/gradio/processing_utils.py
+++ b/gradio/processing_utils.py
@@ -514,3 +514,7 @@ def _scale(a, n, m, copy=True):
image = _scale(image, 8 * itemsize_in, 8 * itemsize_out, copy=False)
image += imin_out
return image.astype(dtype_out)
+
+
+def strip_invalid_filename_characters(filename: str) -> str:
+ return "".join([char for char in filename if char.isalnum() or char in "._- "])
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -1,16 +1,17 @@
import json
import os
+import pathlib
import tempfile
import unittest
from copy import deepcopy
from difflib import SequenceMatcher
+from unittest.mock import patch
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import PIL
import pytest
-from requests import head
import gradio as gr
from gradio import media_data
@@ -1807,5 +1808,26 @@ def test_static(self):
self.assertEqual(component.get_config().get("value"), "#000000")
+@patch("uuid.uuid4", return_value="my-uuid")
+def test_gallery_save_and_restore_flagged(my_uuid, tmp_path):
+ gallery = gr.Gallery()
+ test_file_dir = pathlib.Path(pathlib.Path(__file__).parent, "test_files")
+ data = [
+ gr.processing_utils.encode_file_to_base64(
+ pathlib.Path(test_file_dir, "bus.png")
+ ),
+ gr.processing_utils.encode_file_to_base64(
+ pathlib.Path(test_file_dir, "cheetah1.jpg")
+ ),
+ ]
+ label = "Gallery, 1"
+ path = gallery.save_flagged(str(tmp_path), label, data, encryption_key=None)
+ assert path == os.path.join("Gallery 1", "my-uuid")
+ assert sorted(os.listdir(os.path.join(tmp_path, path))) == ["0.png", "1.jpg"]
+
+ data_restored = gallery.restore_flagged(tmp_path, path, encryption_key=None)
+ assert data == data_restored
+
+
if __name__ == "__main__":
unittest.main()
| Image example breaks/freezes tab
### Describe the bug
I select an example image in the space, it does not fully load and then my Spaces page freezes. The space works well when uploading my own image.
https://huggingface.co/spaces/keras-io/EDSR
Even if the error is in user side (I don't know), the space should likely not freeze like this.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Go to https://huggingface.co/spaces/keras-io/EDSR and click example image
### Screenshot
<img width="1201" alt="Screen Shot 2022-06-10 at 4 35 05 PM" src="https://user-images.githubusercontent.com/7246357/173088896-e24cefb9-b853-418c-902a-4691b4fe62d7.png">
### Logs
```shell
Traceback (most recent call last):
File "/home/user/.local/lib/python3.8/site-packages/gradio/routes.py", line 255, in run_predict
output = await app.blocks.process_api(
File "/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py", line 538, in process_api
predictions, duration = await self.call_function(fn_index, processed_input)
File "/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py", line 452, in call_function
prediction = await anyio.to_thread.run_sync(
File "/home/user/.local/lib/python3.8/site-packages/anyio/to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/home/user/.local/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "/home/user/.local/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 867, in run
result = context.run(func, *args)
File "/home/user/.local/lib/python3.8/site-packages/gradio/interface.py", line 495, in <lambda>
lambda *args: self.run_prediction(args)[0]
File "/home/user/.local/lib/python3.8/site-packages/gradio/interface.py", line 678, in run_prediction
prediction = predict_fn(*processed_input)
File "app.py", line 17, in infer
inputs = load_images([image])
File "/home/user/app/utils.py", line 21, in load_images
x = np.clip(file.reshape(480, 640, 3) / 255, 0, 1)
AttributeError: 'NoneType' object has no attribute 'reshape'
```
```
### System Info
```shell
Spaces, Gradio 3.0.13
```
### Severity
serious, but I can work around it
| Thanks @osanseviero, confirming that I see this issue. We'll look into this to see what's going on
I no longer get that same error as the code in the space (as well as the gradio version) have been updated since this was posted.
I'm pretty sure this is the same bug as #1867 as the output is a Gallery and cache_examples is set to true in spaces.
Will fix #1867 to see if this bug is fixed. | 2022-08-02T21:25:39 |
gradio-app/gradio | 1,949 | gradio-app__gradio-1949 | [
"1643"
] | 2903b7416046a00a5e97c831f5e52325d325a0f9 | diff --git a/demo/image_classifier_interface_load/run.py b/demo/image_classifier_interface_load/run.py
new file mode 100644
--- /dev/null
+++ b/demo/image_classifier_interface_load/run.py
@@ -0,0 +1,29 @@
+import gradio as gr
+
+images = ["cheetah1.jpeg", "cheetah1.jpg", "lion.jpg"]
+
+
+img_classifier = gr.Interface.load(
+ "models/google/vit-base-patch16-224", examples=images, cache_examples=True
+)
+
+
+def func(img, text):
+ return img_classifier(img), text
+
+
+using_img_classifier_as_function = gr.Interface(
+ func,
+ [gr.Image(type="filepath"), "text"],
+ ["label", "text"],
+ examples=[
+ ["cheetah1.jpeg", None],
+ ["cheetah1.jpg", "cheetah"],
+ ["lion.jpg", "lion"],
+ ],
+ cache_examples=True,
+)
+demo = gr.TabbedInterface([using_img_classifier_as_function, img_classifier])
+
+if __name__ == "__main__":
+ demo.launch()
diff --git a/gradio/external.py b/gradio/external.py
--- a/gradio/external.py
+++ b/gradio/external.py
@@ -291,8 +291,8 @@ def query_huggingface_api(*params):
}
kwargs = dict(interface_info, **kwargs)
+ kwargs["_api_mode"] = True # So interface doesn't run pre/postprocess.
interface = gradio.Interface(**kwargs)
- interface.api_mode = True # So interface doesn't run pre/postprocess.
return interface
@@ -414,8 +414,8 @@ def fn(*data):
config["fn"] = fn
kwargs = dict(config, **kwargs)
+ kwargs["_api_mode"] = True
interface = gradio.Interface(**kwargs)
- interface.api_mode = True # So interface doesn't run pre/postprocess.
return interface
diff --git a/gradio/interface.py b/gradio/interface.py
--- a/gradio/interface.py
+++ b/gradio/interface.py
@@ -149,6 +149,7 @@ def __init__(
flagging_dir: str = "flagged",
flagging_callback: FlaggingCallback = CSVLogger(),
analytics_enabled: Optional[bool] = None,
+ _api_mode: bool = False,
**kwargs,
):
"""
@@ -274,7 +275,7 @@ def __init__(
else:
raise ValueError("Invalid value for parameter: interpretation")
- self.api_mode = False
+ self.api_mode = _api_mode
self.fn = fn
self.fn_durations = [0, 0]
self.__name__ = fn.__name__
@@ -600,7 +601,7 @@ def __call__(self, *flag_data):
examples=examples,
inputs=non_state_inputs,
outputs=non_state_outputs,
- fn=self.fn,
+ fn=submit_fn,
cache_examples=self.cache_examples,
examples_per_page=examples_per_page,
)
@@ -668,7 +669,7 @@ def run_prediction(
if prediction is None or len(self.output_components) == 1:
prediction = [prediction]
- if self.api_mode: # Deerialize the input
+ if self.api_mode: # Deserialize the input
prediction = [
output_component.deserialize(prediction[i])
for i, output_component in enumerate(self.output_components)
| diff --git a/test/test_external.py b/test/test_external.py
--- a/test/test_external.py
+++ b/test/test_external.py
@@ -1,5 +1,7 @@
import os
+import pathlib
import unittest
+from unittest.mock import patch
import pytest
import transformers
@@ -229,5 +231,15 @@ def test_text_to_text_model_from_pipeline(self):
self.assertIsNotNone(output)
+def test_interface_load_cache_examples(tmp_path):
+ test_file_dir = pathlib.Path(pathlib.Path(__file__).parent, "test_files")
+ with patch("gradio.examples.CACHED_FOLDER", tmp_path):
+ gr.Interface.load(
+ name="models/google/vit-base-patch16-224",
+ examples=[pathlib.Path(test_file_dir, "cheetah1.jpg")],
+ cache_examples=True,
+ )
+
+
if __name__ == "__main__":
unittest.main()
| HF API + Spaces give error + uninformative error message
### Describe the bug
There is [this space](https://huggingface.co/spaces/osanseviero/testtumor) which uses the API under the hood and crashes with a super weird error. This works in a Colab. I concluded it is because cache_examples=True in Spaces, and the API is probably not ready so it of course crashes. If I set cache_examples=False it works fine, but it's not clear I should do that from the error message. I think here we should either improve the error message or not cache when using the API
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
See code in https://huggingface.co/spaces/osanseviero/testtumor
### Screenshot
_No response_
### Logs
```shell
Fetching model from: https://huggingface.co/SerdarHelli/ThyroidTumorClassificationModel
/home/user/.local/lib/python3.8/site-packages/gradio/interface.py:286: UserWarning: Currently, only the 'default' theme is supported.
warnings.warn("Currently, only the 'default' theme is supported.")
Cache at /home/user/app/gradio_cached_examples/log.csv not found. Caching now in 'gradio_cached_examples/' directory.
Traceback (most recent call last):
File "app.py", line 23, in <module>
gr.Interface.load("huggingface/SerdarHelli/ThyroidTumorClassificationModel",title=title,description=description,article=article,examples=examples,cache_examples=True).launch()
File "/home/user/.local/lib/python3.8/site-packages/gradio/interface.py", line 90, in load
return super().load(name=name, src=src, api_key=api_key, alias=alias, **kwargs)
File "/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py", line 662, in load
return external.load_blocks_from_repo(name, src, api_key, alias, **kwargs)
File "/home/user/.local/lib/python3.8/site-packages/gradio/external.py", line 40, in load_blocks_from_repo
blocks: gradio.Blocks = factory_methods[src](name, api_key, alias, **kwargs)
File "/home/user/.local/lib/python3.8/site-packages/gradio/external.py", line 294, in get_models_interface
interface = gradio.Interface(**kwargs)
File "/home/user/.local/lib/python3.8/site-packages/gradio/interface.py", line 420, in __init__
cache_interface_examples(self)
File "/home/user/.local/lib/python3.8/site-packages/gradio/process_examples.py", line 51, in cache_interface_examples
raise e
File "/home/user/.local/lib/python3.8/site-packages/gradio/process_examples.py", line 47, in cache_interface_examples
prediction = process_example(interface, example_id)
File "/home/user/.local/lib/python3.8/site-packages/gradio/process_examples.py", line 29, in process_example
prediction = interface.process(raw_input)
File "/home/user/.local/lib/python3.8/site-packages/gradio/interface.py", line 748, in process
predictions = self.run_prediction(processed_input)
File "/home/user/.local/lib/python3.8/site-packages/gradio/interface.py", line 712, in run_prediction
prediction = predict_fn(*processed_input)
File "/home/user/.local/lib/python3.8/site-packages/gradio/external.py", line 259, in query_huggingface_api
data = pipeline["preprocess"](*params)
File "/home/user/.local/lib/python3.8/site-packages/gradio/external.py", line 131, in <lambda>
i.split(",")[1]
IndexError: list index out of range
```
```
### System Info
```shell
Gradio 3.20, Spaces
```
### Severity
annoying
| Thank you for filing @osanseviero !
I think the problem is that when the examples are being cached, `api_mode` is set to False since the examples are cached during interface initialization. It is not until after the interface is created that `api_mode` is [set to](https://github.com/gradio-app/gradio/blob/main/gradio/external.py#L295) True.
The interface class does some [special serialization](https://github.com/gradio-app/gradio/blob/main/gradio/interface.py#L709) when `api_mode` is set to True, which is why the space works for running predictions but not caching examples during init.
A quick fix would be to expose `api_mode` as a top level parameter and setting that to `True` in the `Interface` class but there may be better fixes, like setting a `base64` image type that avoids some of the serializing/deserializing we have to do. | 2022-08-04T17:19:03 |
gradio-app/gradio | 1,987 | gradio-app__gradio-1987 | [
"1117"
] | c49a4264df17354e5af1b731550301d65353afdc | diff --git a/demo/unispeech-speaker-verification/run.py b/demo/unispeech-speaker-verification/run.py
new file mode 100644
--- /dev/null
+++ b/demo/unispeech-speaker-verification/run.py
@@ -0,0 +1,120 @@
+import gradio as gr
+import torch
+from torchaudio.sox_effects import apply_effects_file
+from transformers import AutoFeatureExtractor, AutoModelForAudioXVector
+
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
+STYLE = """
+<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" integrity="sha256-YvdLHPgkqJ8DVUxjjnGVlMMJtNimJ6dYkowFFvp4kKs=" crossorigin="anonymous">
+"""
+OUTPUT_OK = (
+ STYLE
+ + """
+ <div class="container">
+ <div class="row"><h1 style="text-align: center">The speakers are</h1></div>
+ <div class="row"><h1 class="display-1 text-success" style="text-align: center">{:.1f}%</h1></div>
+ <div class="row"><h1 style="text-align: center">similar</h1></div>
+ <div class="row"><h1 class="text-success" style="text-align: center">Welcome, human!</h1></div>
+ <div class="row"><small style="text-align: center">(You must get at least 85% to be considered the same person)</small><div class="row">
+ </div>
+"""
+)
+OUTPUT_FAIL = (
+ STYLE
+ + """
+ <div class="container">
+ <div class="row"><h1 style="text-align: center">The speakers are</h1></div>
+ <div class="row"><h1 class="display-1 text-danger" style="text-align: center">{:.1f}%</h1></div>
+ <div class="row"><h1 style="text-align: center">similar</h1></div>
+ <div class="row"><h1 class="text-danger" style="text-align: center">You shall not pass!</h1></div>
+ <div class="row"><small style="text-align: center">(You must get at least 85% to be considered the same person)</small><div class="row">
+ </div>
+"""
+)
+
+EFFECTS = [
+ ["remix", "-"],
+ ["channels", "1"],
+ ["rate", "16000"],
+ ["gain", "-1.0"],
+ ["silence", "1", "0.1", "0.1%", "-1", "0.1", "0.1%"],
+ ["trim", "0", "10"],
+]
+
+THRESHOLD = 0.85
+
+model_name = "microsoft/unispeech-sat-base-plus-sv"
+feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
+model = AutoModelForAudioXVector.from_pretrained(model_name).to(device)
+cosine_sim = torch.nn.CosineSimilarity(dim=-1)
+
+
+def similarity_fn(path1, path2):
+ if not (path1 and path2):
+ return '<b style="color:red">ERROR: Please record audio for *both* speakers!</b>'
+ wav1, _ = apply_effects_file(path1, EFFECTS)
+ wav2, _ = apply_effects_file(path2, EFFECTS)
+ print(wav1.shape, wav2.shape)
+
+ input1 = feature_extractor(wav1.squeeze(0), return_tensors="pt", sampling_rate=16000).input_values.to(device)
+ input2 = feature_extractor(wav2.squeeze(0), return_tensors="pt", sampling_rate=16000).input_values.to(device)
+
+ with torch.no_grad():
+ emb1 = model(input1).embeddings
+ emb2 = model(input2).embeddings
+ emb1 = torch.nn.functional.normalize(emb1, dim=-1).cpu()
+ emb2 = torch.nn.functional.normalize(emb2, dim=-1).cpu()
+ similarity = cosine_sim(emb1, emb2).numpy()[0]
+
+ if similarity >= THRESHOLD:
+ output = OUTPUT_OK.format(similarity * 100)
+ else:
+ output = OUTPUT_FAIL.format(similarity * 100)
+
+ return output
+
+
+inputs = [
+ gr.Audio(source="microphone", type="filepath", optional=True, label="Speaker #1"),
+ gr.Audio(source="microphone", type="filepath", optional=True, label="Speaker #2"),
+]
+output = gr.HTML(label="")
+
+
+description = (
+ "This demo will compare two speech samples and determine if they are from the same speaker. "
+ "Try it with your own voice!"
+)
+article = (
+ "<p style='text-align: center'>"
+ "<a href='https://huggingface.co/microsoft/unispeech-sat-large-sv' target='_blank'>🎙️ Learn more about UniSpeech-SAT</a> | "
+ "<a href='https://arxiv.org/abs/2110.05752' target='_blank'>📚 UniSpeech-SAT paper</a> | "
+ "<a href='https://www.danielpovey.com/files/2018_icassp_xvectors.pdf' target='_blank'>📚 X-Vector paper</a>"
+ "</p>"
+)
+examples = [
+ ["samples/cate_blanch.mp3", "samples/cate_blanch_2.mp3"],
+ ["samples/cate_blanch.mp3", "samples/cate_blanch_3.mp3"],
+ ["samples/cate_blanch_2.mp3", "samples/cate_blanch_3.mp3"],
+ ["samples/heath_ledger.mp3", "samples/heath_ledger_2.mp3"],
+ ["samples/cate_blanch.mp3", "samples/kirsten_dunst.wav"],
+]
+
+demo = gr.Interface(
+ fn=similarity_fn,
+ inputs=inputs,
+ outputs=output,
+ title="Voice Authentication with UniSpeech-SAT + X-Vectors",
+ description=description,
+ article=article,
+ layout="horizontal",
+ theme="huggingface",
+ allow_flagging="never",
+ live=False,
+ examples=examples,
+)
+
+if __name__ == "__main__":
+ demo.launch()
+
diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -2079,20 +2079,20 @@ def preprocess(self, x: Dict[str, str] | None) -> Tuple[int, np.array] | str | N
file_obj = processing_utils.decode_base64_to_file(
file_data, file_path=file_name
)
- if crop_min != 0 or crop_max != 100:
- sample_rate, data = processing_utils.audio_from_file(
- file_obj.name, crop_min=crop_min, crop_max=crop_max
- )
+ sample_rate, data = processing_utils.audio_from_file(
+ file_obj.name, crop_min=crop_min, crop_max=crop_max
+ )
+ if self.type == "numpy":
+ return sample_rate, data
+ elif self.type in ["file", "filepath"]:
processing_utils.audio_to_file(sample_rate, data, file_obj.name)
- if self.type == "file":
- warnings.warn(
- "The 'file' type has been deprecated. Set parameter 'type' to 'filepath' instead.",
- )
- return file_obj
- elif self.type == "filepath":
- return file_obj.name
- elif self.type == "numpy":
- return processing_utils.audio_from_file(file_obj.name)
+ if self.type == "file":
+ warnings.warn(
+ "The 'file' type has been deprecated. Set parameter 'type' to 'filepath' instead.",
+ )
+ return file_obj
+ else:
+ return file_obj.name
else:
raise ValueError(
"Unknown type: "
diff --git a/gradio/media_data.py b/gradio/media_data.py
--- a/gradio/media_data.py
+++ b/gradio/media_data.py
@@ -8648,3 +8648,7 @@
"interactive": None,
"visible": True,
}
+BASE64_MICROPHONE = {
+ "name": "/var/folders/t1/j7cmtcgd0mx43jh9nj_r9mmw0000gn/T/audiovb4gqjpc.wav",
+ "data": "data:audio/wav;base64,GkXfo59ChoEBQveBAULygQRC84EIQoKEd2VibUKHgQRChYECGFOAZwH/////////FUmpZpkq17GDD0JATYCGQ2hyb21lV0GGQ2hyb21lFlSua7+uvdeBAXPFh1upJLeC6SCDgQKGhkFfT1BVU2Oik09wdXNIZWFkAQEAAIC7AAAAAADhjbWERzuAAJ+BAWJkgSAfQ7Z1Af/////////ngQCjQdyBAACA+4PMpH/n1EPs4MPlDak5Fzh3pT23QOozrpMIemMucj6646WZTq/qWAjImUB4j/aEtJ08SjAyqjqFq+2zZ5BmqSKaDZJtE8pZRnh7pd/ez05WinXc/FkOyULyhFtAKY7v5MAAAAAAAAAAAAAAAAAAAKADzFGuPnjkNLV2iu/mGqmEkZOFkTDa9XGu/V+C8YKNhgXB0voRMsMX5rHf2WcKpFvpWqoiFsq5scEBbG0cNIjdGoU+Z3Scu5r9OMpyp0ETCKhFwi+D/g/ukqguM4i4rX7bjr3/IZCXAiOQ40t44c3thLsE9d7N/U6uePnhBMMh4hOCoQEL9bQcJHJpEL8EJsRPhIMhSZI9/aBmdmUAb56PS8k6qVyW57IMTYbCOJ9d0wjC1rwuLwUWeX6YCLfpX3T2QXdSsjThYFKwgsJm4i33Piwe/liwLaUeKfa4XjbkP5zsHX4C78gpFRf77q3Pg5bvCukbN416f+vQiBunXlcZ/RSdUXg9phF/TftxJ8NOk+sxY19g0TVRy2UMBV9uVxW9nFrQCLYxhOK50MLQDvEtRzEFGD8rvpc3cF7vKRFT9ObTh9vUx5pZ5Z0V7xngOWsz/DlxbzMBcRYOHeczi0aYAQZQqgnfAaNBO4EAPID7g2RpPsoN+j5Q9aclGv5D1s9CdoTT+mmvJ26cRh1bNNaI2isW9knZ3H+8hpVtpGfeLsG+6aQ8kkThDo84BlIX26mGsWfAaZlM0eJlPWlqxudzu2IFQXqLOzk819lC3X3zG4c+9EVLhEDepIDmRnjv6VCyjH6HmsJKeuZo/Lu0k/7RQww2vY/i9azLH5f0ew0XFNrHruB8MgFpwwzVxQttXpwhHTAl0B1zujsaaNX1+6vYSsv4DBORFKQiPYb69Nc+Sd46gbcItW11c6DcmdD0Jj8XOcNtjXKMryjRWdmEiYrAXVUTkZLsnIZJxpH3Dzs0V658BEWYfgNsrlVi2/8KaqOFpPXMyoZ4M1sWKtk13pRAk7xeQS0OqLKSkn8rzX1pPkKuONL0/vn8KKi9auAWZBE8+0u0JKNBe4EAeID7g3V/ImgFnHyflxJxgesfQU/hEw2cW/PTo6SRV89BxbmEbiiUEffK49yo3jalZn31EOX+GrVfONzQDcwz6+39msxgr7yRHJBXlCrGuDPhZn1yEg0nbQoC6cuaiocVGYivipU4B/cVG+SM/1JUZ1dOSMSi7IzUx/cIPxL9L329mCSn+d7e055zJthQaWzB35p0XbeLEmEDGf2xbm4Bt3eg0ROZMmKHC4tsVohbvjurVAhm31fk6KysYxJ3txAuMC6A6mpQMFmo9ADCLqwFP1rPFcR5+DNMCG+m4dvKSmF71lXvKi6kIVEP2U3KIsekd0GHY6W4QpybjUBlcIvjEwFMJcGpoeBpVZ5O+HEIONYCOJ8y4Z68uThakypsLKgqkPa4bvnATI6Hj9WLkg43nnLxWXFIobaw6mrpqR7+JuwtY4eL37PP1hTYv6ypROfDtonK6CtKUZbae3Atqgk8dsiYy6f7UXPmovQcgK2j6VCK+k24/T2rrkqjQYOBALSA+wM746KTKovZvocJZAogLOpprNkJuKrxFmMsLcdV/47iA8juYNVF2DA+W4KiFx6t7bflq2DELtamBLn4H/5wvv3LBStiTBgg1fgcO+p1iWuEg1RqSvLOVJE6oVZUrRxqtEWRewOCVDMNand7Exdc4rsjl+d8TMeMdalskYwKiDRTPxjIu7jr4sFGehIAL5bod1tiEOq7YyPdSliPnxRT4VbrICMoy80t5E6+2H01d2eReYzsRtuP4uqAudLvM4zL/2pWwH2wC1QGEEIiKkDFAbAYPFmwqKxMEzm+uXr5xnbMB69B6pyqsp+yq9cWoT96Oh+XMMu6DmtVN1Q/qzkUET8zrXOb0sJ817V2Zaj+0QVAlmhjFVGE1q72JcXE2+PN/KFXMooVaS2rXraiJYiXCsc9FcmRo/JVjf51LVKtyaxGp3syZghPwnyiNhGpbXCA0yDn+qsx7zItsxbmjL3eG6mwI0jkdxMhy55MpbCpqBESfIiZiw2IHXxQtI6KPaqjQYOBAO+A+wMaWBXecBWrz98jGAjM2LAvlKxUqbKiDsOE97P6bQkKXREtptUPWrrOVJzSgiTue5uAOfnKc3lHkixhmZiIC6M+hmmWc0NxW8OekQfhpmE+juG6BoUE3FTKuRPrmGytfqahopLAtWxxvNDgX4TaoqylsdgXpMaS3ZinkA1UvsYQPxc56FIj4lFeF3f8ea39rtA1JzZka1asIQJl8wor2zoRzCW6+jX6anhLKEBjCuPy7TwZ1ACCpU1tw68DvFN0nqNpAsb0QdYOst2y8CjU2QshwUQIPLMhws+PipOdCawbkX/VltWSl3DGmJGx88lRf1AsGvGmykCkfuqXkTbVuUPeuFwHYNKmkcUs99U8aYYZyiOv8BjJzo3vQmYNAIrb+EcjUIlSE3ecrAVZv2oBGY04Ntf9oFYPUGWLRvvd8UswScVxAFToUISFozdpgrfZwWtYikqw8sTkxZRI/YDXY2Epk2O8w9XMVYdxI4FojNsKQXpYFnolP5vyPdmN17OjQYOBASuA+wNPuyhaEA457BBCiSmcmDmjbP5UFKpdUvdWLRXtxNZpxos2I1ZK+f0xmwbZx4Oq5hBWsNBBwdsd9zReiOwY/nl/gUEUynWmfNvDMLRfwb47JQWL+kqgDLRN5WPJTXTpyXvVRoI4amc7Wjbesai+EG8PhcpuABFMJjNbcU+aGMJuT7rfb/PeAuapGwtQefLOeJG7ELIHjqe/Ehizufd2dhXL91M3E5syhmGzdrP5Qox/DKeQxt2f5QXr+S+YhpoHbzMI6hCSPBePzb3hdbbZ9kbabpnWBWZreAsINDgcwV4Yjx87NpZ6ThjvpqFL7GniPcqU3CAx5e35PXRwR1DgkSIqi4GEihWD4cKFWzDrxDAf4hSvvGLFBiVgu24oaJLNgqmBTunmozN3leeRDGK5RBq8CQ/1a/jPQxpKJqwP0HvfM62cutODtObEl6hOg9+MXSb5h9JYvABoo3oZa+WYiWCBl2z7WnAFN7fJsjteYtuvDUON/O9DW0v2YzNdTNOjQYOBAWeA+wNQbXIGz7NpKk31vLNIFhBPBHrdfP7xiV0usIfr3zJa4B+VymnG3ytGfixcorNxhKpbCs2H1cLrWjhSM9wcVdcRSWfQ1T12E5KV58cWTkfTEF9bW7H8cXhlcSvvgkjrWaQfIx0eA74JVzqFXx6BXdd9sZXRRmaOX8Ad+mz0fu5mIlwJW9KSk/M3g5W4ZGo/LslHWpPLfQo+7OPokpNR4WNCUdralfz7TBza7XMaWCGeYnUYFLf1POjtxvzdMgMMxZ2pDcW76i4k6roOCGKWtjAC1wAE52lir7r6YUeqQbT8QMDFeIWHSOlSVZnmrgMalzfW5HB8UEDMnWsXNYYMGSJKffDXXH2rBb0GXJg8mYatPspytQUu5xyQOWJddWkgonoTU4mFWUSohuUcW2cpKk1rpdJpNKod0fpH5RyoZnAZZYXzeQeLA7sJ4LwUZ6OGwj4ZhZlvWxJRkIQtGJX1jgsyKAVToAwrYr5lI4pTHnj4bA/yiDkCjD/q1jeZsuujQYOBAaOA+wM/NZhxY3E0H687M+siqrTCmh9MPREIILn/hrUqspKTCRXlMIJ/PZeUsDAcyrRgWHR7RM5ah/IvKdCsJKLU5Q1nMGESjH90HaNBSHf4V/Fs+PVHqZdKbA9tt2lZJ3TINcySP0sw+99rHZckGW51Re684SKYmIZm5+1vxKGrdGImUXBz0zG9xkr0kutLvq6RhzvvYhj9orQvovv3/mvt6yQAXZ+Pv2lgC8iQXN0Y4/HS98zUWoPOcZklWrCt6dUB7JI/P0xNsTExjF8/wnDe255TT2uR5NcFJI4clXPaDVcUApXdBa0H1NzIb07WHX2nHpi05c+PYN+c65UVf8FnND8gDjByXsYy7Iqz8aSmIKULKM6iPi8GbhqkamKHLsTXIhnFih30L8HIAjhnleY7FiOxrIukUt3K0fXHWVVpyXklL9J5u/nuRV3epKbtTncXQu1MRf2S8vkYW2GGgX5xCBwoOwkESScUf9xWDwYqVz+VR+Gs7DKQWWnarIsg5XqjQYOBAd+A+wNAhhKTNez6jmto2HjPkkOFDiSfmZnHDYtbOb1vTXN8Rbs9VbTdLYwHbw14DpEljDRsQCBpvaAQQix+iBWCixroQ/dJkTS/2KnYzFOhlKaIQEffrhpW44LQM0pTabthfXVQit1fGsCsdr7zPOR2mrlb5ccvVbHcriovtP6lGzuWPOBqqQnuXKLkyPs6Y0Qa+9gAujc+jripZJKFOYlA9MSwgliyTOJbTkfI2wlqqTKKoU1bcZDQpp5Ye2Er6GaZo7ZGVn1gvz9lDOSMCMyr4Oq5y6Xktzw3CGM6UGX7SXMAOtbt2RjPaHtuXrAq+0qoI4+WbXIiscQqeItSTn4ikSLFJqymv4xvxcJQRfJB06y7ZpT3tx5A98/F/qDo7unBCn7veNDgQGQLcmimpW9SX5oQraYkndGHvNlFDSDOAsKOK3IJD7uekmUcr/WYVqArzNBwTrZ5tFQuZ/8JQo4xwX5Az3aG1fSMtG0l8i7jlER7MCybZGkjIq6MT2A0NbGjQYOBAhuA+wNETRKRPUv0GQWKTjosJhcXb995F1P2wm2q2Ol6kvyTxdCbaQL8LszJISOeAUYQhoOfGPW02CnVbW91T8PMnnj7qEIxbO8RdQhqJsTb1Ssio7Tu3Pshvnendh68/uAuB6sJywkAtWlsQAhOspjcSb8w+WY7JoHJUml9yJ2IUDIvQIEBQ8u1w500gsyRVh5cwpTVtng7jW12zb+AUriGGLmO3ut72EuK3uYtFNSInpI63kW1+poJ3e9H0Ejy4CDRd/76/mtifMI0l3OuTR/a+IIoN5r89222HTkSKLS587VDvvfyoKoj7IAlgQsjY4OqQYKsOFH+dVjs/8KBkYU2/T+Ruv60j7K6zURZ1027AwH5Mzcaf0Vv22hzoIuVhUb0UwHP029fsJQnlqH8hWzzaPcBmPreenDXWsne0HLoKsB7OX7r4ns/IHscX+MVNWHCYRumXwrH6y4ZS+nSgZyG9iPoEfgEWEloE9Y8SZdWh/9OgMteGZqteivn2g4rPSejQYOBAleA+wNQHGwm4JvyZW23Pqd3njZ31QMzuGuZLxXuiRWl8JR0b3PfiNBBxRxv00xBhQS+VrpOCeMRA/YdnecYyI+6knzQTazpTHGxU6S3pAO6elaxcBswmYTl+hSlcg4QXIgYEwCDEdWTpSRi6ALl3vXyvsu5Km9/iZnXGlSv0jM0ho8UIuwzq5dXAUJPlrXg/hAYuZZc9wOkCNhpXdovJHXFnDzAs+fVYYBmghzjGCPXItR2w255cEWmnLy+U0Sg9IOLRGr5lvmyEXKaNXLKIWUdrF/rK91OSPrQay0Djis1tK2xdIZLTvDVlr8K3IEKoqJrBUzAGHZo7h7dm80vlTnBGU/21CfjaMi9JStWk4Ua7Q7b5qp6+5W2Bj9fpDZ2Ub1gZOoTn/rEUVameFjy6hbIdRt2U+XvAu8wKERAVzNRgaa2DhOL0UKzZg7HHI5IZSMIkExBT2ybFrDRog6lJsT1hAtcTtx5Psz+IF8UpjRi++WgvIr8iO2KhCA3AzvtpqajQYOBApOA+wOaoKR8kBqXC+u69TtLyz+S8831alq62o+0U1GEKnfJa9AtlUNR1nZJpw8DlA3QkaXVGagRdmsEKP/TKwyWdvkOMZbKPpr1Z/4mNfnjtkU7jWvs1q3kXzrnFlRFyjlmdoMt1A0TfhRxQA12VFHu2JJE2grGlKWSYKvcluKbJHE1JNagDp/qB+9lJvxMJA2kkDBQQfIR0mtpU1DTHEK9yE7fyHvCwOiyiveTCshlsSJ7WvlhHQx2Rtn7qjJlpb2SyOaNFJ297nllufOLenMk1kB4blxu4DnSg/g0zdmSGtwR8RVk9sQEiONuVJZubqKtiX/jpEG1CaUde6+FzNM/fyIvDhbjFIjqxPdDYLWZNl3l5gCD1E54kKXeUMe7eDToWhk+0dGI/4XDIp3pI6a0SbsWxNk09UulucwiCZaPl0MenCskrh26NQ+Zd6LJsW6JfD79si1E/GKhB3LX0YcYvY/2HD/WcOcZ9JzNdwG3KMf1zX0OxXBrORAg7J7pQnCjQYOBAs+A+wNAf/DMyDZlK6zqR28ylj2JXQzg9e4kK5/vL75PNSiMO1tdchiii4UVc6iTjfYJXXjqG73LpuuQ7T1HtWj4u6hVQNg6SZts3qxlTpIjuWXdVMaeKeYc7x/DGPG0S4DVmC9U+z9IF2icsvHHxF0BoV53aC2jdlTBcV+vw8xeafm7QOrKTmL7nglxbza94cJtcaD5gs5Vfrwgoij71pTNiyZ9iDt0I3oLbNCAZeqMtSbp+PFnK3Tv+zhx2JKtM7PrUyHTW3qo5LREn+G+7EBUKmCFmtStGBP72FBROCzkZH0TTv1U5Gqz4JnPj5YBfx+jkQx5jznc3p1ldEZz6ysYl1GXN1fI4CsGygqvFzrLQAn5x8o9WrgtaYQxEOAWTHK1Vp9x1+X9EgA7RZV+9yalHCaKjBjLx7iea7pju/muJ27jlKygb7W2t0rj2xXlVJxxU2KXSn8atgwt4aGQBJMEavLgDP1Z+Bmvlo57X9DnTLbxP82j2chb6T/TcafjRu+jQYOBAwuA+wM9aYQ8fhQxZZsS2xCi8dq5DrCTihUpnjchwR5VGlVhZqycrEkjLIsJe6nCBs7RdeOKxphz4n1KS5FtcYRUJeR7sQ2sDW/NC3G1h3qyRMIj9A38wP6FnnVZvHIy0jGzgUeh9X6s/6tlMscE3fN1+hZaeCq6jD147dSsrOS+YW+NPUEjw5WJ33BOp73DuqlxpXeegP/gPFS52aZ5hZ7uz/WQkJ4qAgmEUb/J0iVdRXzO8/0XK00qq+Rp+cWLZLbDuoYHqK/xg8aMq3ZN1iQ97/TLkpe6RX0BI0ddUoiMTiHtlbcSf1KUAwQfGsUgRTJNIxdelIDzHS17DbyG5COPSRpKYWC8f4zsxoS8jHzdZE/kKUA0KIUP8AYc3qrfrZiLPdkbmqKn4ixlJEdnbPTF6IVxmCoeR1sKjJGjwWrUxCIrKDiN8K3viGPgsbsHytbfffzf6EEeUYxkFROPx1SFMgODw5GsnOcMozYrg97DD80a+DMr//dEjV6jO+IujEijQYOBA0eA+wNAdJcvOohN2QTQF4F/DpVelPfdj8pYus9E31VBsUUGHNaHbhjBqeo+/D2MI6AQ1NOHUteCsYt7dF7NIWx5JqH/uL7whC2fOSjBwHT5oPw8ZKfXIUwGbk5J1RZrdbVVfaYwJViuAeqXs/WdUg/2PD4gT29h9Q5fpq+vhFI1BwPaPxEZFtEv1t/+K7fNrmhNBYG/30bsBKVHbw5AmrSim6Dhkd/pGE5RG4D8ecsUvGlB+rnqACTHzs7uxY0gdTYq2r4WH2P7DeXqVcMKMWBUG76hI6IGKW7vBXNbF43Ap2vlJEmZURzB35jl5QkSbE1owbFLDHOoyDb+YDt08HeSKkRFgxHjKVAbSWeGMQhFDP5v9kszHwCCUnKRkpK/CR2vIqna2IBO0QsE49PTjmFBQ2plpBuprVOOXymr3jVsqy7902HVHr7rUfE28Nz3/ikOuBtgGy2KBk/Yxa2ksK2rePpck18oI8h2uYpt0wnaurMeOB0X+hHVZE1O/kSIBvSjQYOBA4OA+wM/WaFrl20Ui032X9rmUgKVbM5pprwG4iPi6fxUJg3gmiRJDgFgneXHJplCRLCx+F8qZa885m/GPHCqot6MZN8BJDNdnquocrEBezXh0haYqkjxDx085K1fWwVJCkMyCRPMx+KUg4A1XgF3OqjgWx+VHHj66mq2F0k9otZ0UC5qRC2Qq51JhgRMAJqQLtU8cOb08hG+QX/Yter2qSR+lLoLAikjQ+QQUOO0hCJuXA/gP6SXXH1dqLNhkASFpvbKsosmT/QLiiRZidbJ/6Ct6lYyOG5eP0lYRjrP6mK6mnOaKuFw5tLG9qxKw6IoeEeY7WI+A8mr94Wrn8kl9bKTsjy+zA+C0SBq6aUzeZQn5OtzH5O7h4u9MPOnAylvIEjR+bdWoQlK7FJOuA77nR8NHrb5bEbKMDfR/aKB++XizUvI182P7M6AwP8Uhyi+Hajd2qmBzGeN/iays/z3hP3ZPd7z45r0LIXw7H9zZ0UcxkJgXPTFbg7FjGACIo3mtsKjQYOBA7+A+wNA8LZSgbInqd+Lz420l4sGZEKHpdRbYp5yK2MIkNvrRkZ6tJKIJIQnGKRoTHslyhhrKmuGqWAwT3PuL33CT3S2kjXU5JzvN/lJTK7clyJc1PunTG2+ipQtq73aW/YNNA4LvWPLL1FB62kooYZrrLNsFnF1k65HLRtPwqZP0fhKIj3V/eQ31fhNcF9ZqINrTnZy7pm620I5gqXWUykwFgJUJh5Lp5G0I3pJu9tsmTVBLs3ArDnvTc+aiWyVCQSwZwaMsMNpQMg9opB9aP9+mfa+fqM3uDqr2+a8c4m99ZCLLaqWlFZUi1uSy5bGgywJVbwAhYd7W5FU+7WVp5YLMEB0tP7qYg84kzz2tF3th7hQ5gMqJEMuSp3yOWiiqCFvC6k+ydaa0DNsJ3NnpdUn+hmow9CBLHREnz98RUQtm2UeiINGE6Yo7990Fil/jT14QAroZVgwYsATUGbFO0CktdifhlL4HmJKE/nVhVimji6WtLzevBmN2WDj32CfEaqjQYOBA/uA+wM/GMfyC+5QrcCefekrpbSeOkVMpX4wlR5dXuW2BEgceI0M/cUHWYLuDuS5B3FLerjXFoaPf/sm0zQJ543mF51/Hrl5b87/60bg9id822D8lhIt1Xi6ZhPJE0DiBP3Y0vFsvHhMvTyBfHHJaC8tRcZqj2yXkBcDZ8VsPW736sGiUZeUhHEj02jU4v1ZaVFhzsDcl2pd5EjcP3Gtw6hpwDongj6HAPvsbR0XV4zeCHSsKBEDhRL1Ct74hF/cfl8KP35Q46qnDsp6mNXnIHuKUYNHOcp/Tqhn1WjN35J/Hi0BnArFIMZutnohF3k+aEIu2H4i9XLPx6CBcNK0KRZe70A6SU22uucHcuWPCbjzRajRFJmmPHCO4/uKLzrClZu0xMnxu9OBiCcjIl7Cu125NthcX4nbGZeEcq2vS2lzKHQxUbhhtyf/OQs+ZLOoFaUw1lR3HHSA6Ksgh4WrpUElDOjkJjU5+eLzmcFj446vVazES2L0oKevLHuWc9ILB96jQYOBBDeA+wMiSCbZHA9+efZLryV1YiRqC/a6fq5QJR0NtSmHEk23ZblnXEWRZndLO0FAoLYJJx/5uQF8Zbf80zCs6bBiEZEXIv4c++XW2WnGLPgk2ytQ0RhQLLG5bL+864LO9eqJjsrk30BRZcNKndmbmiZxvZ1jjlZXEPREpMcPiqVrw2rpPznmy0Z1c3rfheURzpc5xstDcbb5y4cDG1K1orgPVrd/gg56lfV2IlmforFNn03Snjh8rblmoe9OHNDYE7xbMD9kNnnPApaWhnNrTM21Zz+1btJrWpRze4LamvAcibKO5TyDM6JPpGiQM4MUknWmYfeSx3nQMUT0r83s2zx6vURBIHZt6Fbp/te7HKM49nraW0aUIPUgavx8rpp+mbLxaYT9wjQizg8rQnWXLoDGbZotsMY1eVAS7gNEgDYSWs9JRQtkI+7W/+urYll0vwWHcQfQDyhid6AHNi4+ahH08V3uMzcHEuJOgT4eX5Lmjfi/KtCbSD7/Yz9UyAGy5rqjQYmBBHOA+4N/fz8RB8z3JXt7cuc6lRNqlHwU83zLL7Xg/9SG23471qkWDLgZ9j5chWZ0Lk5AdsjXtJhZ18zDp/js8JGokUvYIf69qM5M5+C525eMDYu5IgeAYCxCg6o8/IV011VGGJip/km+ABdL0p8Ge/fABmFhBgLrhhuRMMj2JVxhZ6oxwp88RM0y6EahYfTbxpnQf7fm6PW64BmszQN0fDzSvP+qwjiM4Qz61aPDWuIMJsGH+C/iZp0f0q4/7+/JilvwNZ2hpSmAvVLVe8V8vSRNuMTEws1kEIKl/wPtQiRuypz4NmT0ocfy8Pc3KagMHi6fhs5sfNutK0p2Xlh/XBtrepKchKMVB+7w81CHjEXgvLuII/bol3Aqnz3+3YtrTCusCOgIBQhbcso6mrWVO1XTW/3tAkd2qmj4mRdXNetG5bU32/eKUaIndB8188ePl5ospYfdaKwtcdWS0a4srFYd5ga5Ex6XHRhW8AdjJZf5cIt2WGrjctCgFYdKiiztpCd4FrbuwkCjQb+BBK+A+4ONlvDO7lzRoItQ5Rg5I0uSMCY9+7rEDz+fgSqZXUvkt6FaVBSh1X17J8+EBvOmrk+/5wfBNcFxDSohPxn9Ap/5NFum46nKJQbOSuy1dh1vURHujEVzQpj5GcKjuH1BeYin+Q8sTgbeV2+yCyTpjuoqRXOxqxBO5ZHD8mxhfVLkhTmfPWYNLH/w4ByBheCoO+snEBTcf2XuInUprKuDY/Br8axWAirmjcW8cqNzQiQMNoCn3seijnjZi6di6N4Ra31Sx24iGh3hka3ZQKZiaMlXsl29ZdqdTWOnTVaP0WUw4hIVO2h5X7k8ybRxU8+dufq95zxWG7330cUpzbQ+myMs3A4o7Bpr3VRBStmZifDde0oyO/u5mS9pepYkIYpc4rjmyZFGQurduRx6fBwyno4wlKbwH/bR4sGAkXiO0UuY9+aFDWunnnSt15n2THINrfVRZ00PDnGCVPnI5c2CGjqHkChNjHykoTybFQVPW0Xp/v9onsS7JmLMzi19aJwy0fbV8t9POxiaDujYvbyhM0PNx7qsFCtHExyZoxlu/KflZM+xeC0vgzssGfM/Yrx52WKFaXujfC0pCkGjQcSBBOyA+4OUle7V8d+del1dQ+AfX2kTEsQtBgsCeGfBhtAlF0j/UBtzzLI1WK3/zwNyN5smy5jewmtpVfEAxcauiYrCQN9nykXo2ZJ80bCRrDn6oDTmkZ88bU5DBEo0783DMLe3nOgm9VwPGVQAe4ufmY2GJWseAvhwS7oRYj4CluSmVi4o1JnzZD0qDNceFZGjjJUqVH3YLMAbmkLq/qU75EMUTjs1F7gbbOu4Q7i3ALoB/g5ojh4dxomJd4Tf3Jz1WYZ7nH1nVc5y19IipVH3XZygYOZ5Ortgxc3SiU07F2Kgzzb8vFDKbEX6EtUC+aalLmlJYfQiD7HZLfvbzZQ+buL3BeWy35dNXd7KODnKRhWjn9Fam2TdJJ17nLEV6msWYIlBfn8moLSbXQJxb6kKRe7Un7Z1wcvXx5TajXNp8kZCz+vlCAFuj2jeMuWVL6i/HsJH++CPopyAotLZ1hHyq3HoDYnQjI9aF2BktGJxs/M1W3xh3v3IvVvkgBlLyQaAZrokJ5AnJv8x+1u2dqTKo46Dbofs9SevpdiZtdmvLNmmhApg5sQXEpKCXTeOZeKsvFQGvmgOuWNaOPv5t793FQUKRqNBjIEFKID7g4WA6tXja5c1OytvkgwT63HOr7vajJ94r+F8YUrRSv+aZo1AVbFlO3iEHp81P7NR6Xg0lVwicDhBoCPfvjwDhw4gNtqXuSYdrg/oFdHcUYktX+9LgDRVV8EhQKkWfrq/O+uuXFYYdeTtJaM3LD3WK3jHFet5NE12aUw9aauVDaRTcS+Y5jp6Su7UXnZ3o8Zy9yWLTG+dka2kwzaKrnbkDYe8n0xz5v7JWUrNLhFo9AkKUuC6w+Vx8wIRmm73LsFpyJkuEFwF9STc0V1h8cjmm2mDp6oqEiWQdqXArDZpFPVJ41VMylcOI+lPY7MeYe7SrbRINClq8tVfVhEo5kjUKCs8CBj5B6RI7sLKPRapa5j5veLdkNwR0QXfE4HH9AXTHdlswAl9r0MRTjTVdkOhzF6SAwJ2+FxP3pTY2TKolhSchOx5Auxt/WQ+oG4CuqU9TLt7lfoDDOD7Qt9rOKJirGWN9SE1no5Z48pct7kHTm0u4jlFPFkgwemf8eR5v6gbdAOu3mWWS6NBh4EFZID7g4B/7pxmFStND6gEidN5ZQO6VnEyNe+JFaAH9OZNYG6G/52RcFcLpBVqElRkSDKvUE8kTeGCnkTSl7cvBvodt6nHq/Z80Ok1lcP5p/qUo2HQEufDbWLo+LjNxKv08PI3N/JvWb0fYwmVFZCZvvd4c8mT6Rifz7woVyMpd7mNZme/hkrqruPvni/vgDaTGwlFPtYOEUZLiE/Sfqg4DCC+2cpx+2zdriBe9/0zWviQ8FevnH1ycYoM+NMPo5D8DG26OHooDKgGI1k22yF4DPhFQJ7X7Nr0P1DwoaUUSMWFGrHbF//TRWHTdHw5zw6fYlDesCoef1JgoWt8Q7XcVAOoqzhP7f0lqs+1Eg7aGssS4Rbx7w0VCor0qeRYdNb/M6CG1qVVLRfl/VXUkaHXLovqie+Is9hwrxWDpk16ZY3irt2SBBnHlxBuLVNoed5GJhi88dnpEiOMYWyY+teE6q9EcoOjHvzDC7+Nff/zAx68fYvMiMm9egcm89RSNVSJgJjtGFejQYaBBaCA+4N/gOqup+c0l9fkaHVxu/bZ+V4EBVrSlZP6echgc7ERYfs2KaGXAjO7pzArdj52MNF29CJc9D52E5NNprs/U4ZkHRj6Mw3yua8PHZ3RNcjkU0hkW4g4GDRt/eInB2ZX1eq1j13algzi5iv79bHvxIlXQBeoKfFSkMyqFjl1k0tX5knuN0hx/Ifa3GbPMeBqFN4evxb03+8y3IWTTzSt39Tme/jnPopL/5JS38XHwq/5nUcYGai+yaN/rKN+2ANO9255DJzitbREO5XAFs5qzUgHpPvgm63cY6q33lsAtTYpZIdgMC6fZEIXLaogDZKFJ/uA6kt+/a/Uj6lCq7NHrXIWT+rpJocJmUo3n/uAb+pLHqE3wykjfdmT5yHCmWxNQzxKH2LCV8eKPwNtzHLjSJauWAplJTagql4Fk9BQ0p/JSztBM5Cnw9t+FONDNfMSFB7r+3Tacdv6PpNcZHb/wYjQXqONmAbxuy67c6TvVsf+XwRjMVnvDJ+rdpYVMyb/+lWjQYeBBdyA+4OAf+q18mBLjgEq+6p75VGkt7LcuPBEXVAptuRMteyUWfaMTVzp5gvO/uQDiW/0KrswPdgpSYdFqlbkRUgamIkWY4LN2vK0gnX7D5I0IMnItVatxQkgQL1zNVHSrgDlxgOlPp8ma+rsS74DHFH49bYl6p/WIiUR6ad4KRINx+8yK3pV9K6D7TFsE5ILROUEzhngW0JlnLPTeZb+4f+vyNDOF6C+ZYbZKoEx/64KfIw3sWOp5I2Oz9WDFXI+YGy04jYKeO3JoG8i2m/T88XYkffO1lImX6HrJsrK83CQI1n6XjSq7+HWzh6Kjt4OoDJ24K7pYwVNFjdEy8e5eCMKXD1qXfScOjcxpfOf1BHx8m1LsLU5wv27Y6Aj2wXA6oUHw+JiGjK6c911SE5He2R5leC7xbuEKEGymS+cfl4tgSHFcZY7PiUmNCe9IFRllH6oBfbuJkZZuBwVnnF0bDHRnXo62tE/Ku2Zqm5vPyWufbG/sUzDpD1XMbMCqo+m/4hpXKpfo0GGgQYYgPuDf4D0cktJTWSrDV0YJdBji87/cwaSvfyIUOdhgfGLZ87v4Po2+/doUWJxY/bm2CvNy27DI4UEJAisyalvwEe2ukEW93K71UO1zE2oQVGJn5qtKPmbkkyZnGaxXFyAlBovRm5XBtKKtvB0qjsCdvSJxnuZ2bfxSn/tV/6r5q40ywpf61i8jvrhANMtlq0Hr8JuHIOYAtzBohcHBOiQkNCpf2dgQG9HU10r3fKW+0EE+d2cV0FanuyZxQallDTh6pT69msMYw18gKKVDgugkS+a7bCShuuid7+toWdmqzZVuIcckm3LR2R1Lz017UAJt4UiROqoGVA9FyRVjYqtcVmX2mD0pJWU0gdBUxFsQTqES5GjYhR7eBeiV3wBAOCcq2kFZKbEzZ6tT6l3LTqPnuYF8hHHAl1CfTa2K/qJ9VUxUn6ilu3m0X0ywwXAPK+vnin8XAJPSOT5meY7gV/GtWhmJGgvGSMbBhqkv1oX7ydMeKXAUDBwFTZjB3Xvf6v+A2pko0GHgQZUgPuDgH/rS/Vxw0tdFvURGYP4KsErhCNQikuyU0g2dkhrDJglQKu8diGnIdoDX1cvV4L2my1ZJmEzZrcfSnYxjL6X5wHVNz6eH5n5YROxvAeI3gFhoPlgvVQOvygg3w22N6nAb7JQ0j0RkqyNQdC2nmrrSpasXfU9a8pmOqu1dVMYe7I6YerCO1O5OXTNsH8cyGdXe1d2lS7CwE60SfXywn/3stK3iBYvxWVIHA6SpVSk9HEDl2dleuFUl5DyJ0/au5KxJhTPQC/J3xY4Sw1hV43WNgHnlESTmGFndt7nvyVgET7/GPOX5mi9nlgm5BbQzT4iF9h9vUx9NpOL+s+rhE3I2GDqr2iofoW6TGp65hLCyR4TApzN/u8U+KV5oDqaqBpF1QA8Ur1Ye4HhggDSx9eOpnYM5Atm4VXePmVWrJv2VE1SZ94gUc1G19d6Ue124vHTtXyN2+oTDlhnTtH24T0tsLrG2rXejAhtQ5N62KLkR5KZEy6ViOrWeEZ9b6KbLLV4ZaNBhoEGkID7g3+A6ve9WfYcwIlWJZW4E7iKlf9pCNn+DPO/7SAae/M9XNAqfSF/6snUxltZk+HNTtetVuRfOCToIanz2tlXMbdj3nZg5dFpiEM5RrmEvIA3rmD54jGx8/wFg14bA2s3yh42Rb7EcZ0e0lI4JMBux8qFuPwaa69WGh/3jImklD1YZex9DN33dJCXZXcIw6n+JuI4DSwEkv1AiF5UvSLOXIhzMjHS3YCjPaOA0GF1RehpvvQGANBAe2fUxx/7fAZZy9jz585yVGWvf4s7DBiC4qIgFoKeWbjXiW6AGhLHEzIhQIkAsAWDIhJIam774GqBRt7PHI+mKzflVLSvhZ/Ugdhk7e7BViVbwFZzFKzFhsTScIKaVns6W8fTk95AbTOnULaUzR6kkI8O+fYYNroT7uk/+ZpvgRvLxSfbjutx7O/HGgOxTI0SlDfswJrnVznVCgtctyTHszpO1MTNDv55M9h0kGxIZjMlc+iCBuIXVL6wBkneBNRKi1UX4q8XFsIEYqNBh4EGzID7g4B/6rRpBLBG9xLgn5bP3hsSXip1jPm5u8P13LqMxJaUHl1Sqirn4Xupyj/O3bTncsVl8m/SwZNt94x8bwYSyzVxvPgyZPSi20HBDZ6gGKY8/7WpzkiXMe7/hrBVyrovOQaRYyQMOJUopfqwsr9C8YhzXDOUjNxyinVA0QJ/0LduiGMnWuKhmLApUPTwnqDAXg6ZD5ZtcMNSP2McBVNJ0CYhyNJa4BC5PgsfvxdcFbER55xGhkZ+gApruGcYNqKC7wWXOgpAeoltiu8oeL8WXWIov/Nd4Vkg1iOot3mG//4HcPgXwH5xNv3ZpT02X8v+CXQj9+34GzoRPbmZXSayJMMxCmB1m6pFb86GfyKaRwYoIycUCAEiSKUHqub9ijFO3ftQFad4iS3rCphPg4+l7k8XNqnXw9xaDVU9YAEBZUW0e5t54pdEeEBAbnXQabXrAAi4HZanhUfw9096oKO/3aSHbpAueZmD5IeGKoklFfZi71/vIl4SoJ/y4T/Kzw5824ejQX+BBwiA+4N/fe0gE9oDzk6pPWticJk/R5FTjvon2CHvSq3CR5SL4kJIDSwtYpPjzDCvNAmAdGGkKYRtYWF8l7GuIkcy7/S0cMqhKUrLVeiJm7AGVgLa8jK9JS79Jre8BDOsT5df93WB3s29/R5NRFRG+N8K0Hw9EOnxxEIeNUAREgLfMB1JVkvuss/QXJZ2+ZMBgO5Q6HxwWAIZacuc5DXGjtpb8dOS5Awx1445WwtgHItCQF4qh/TpOdZE8UbNv8MFdWk+Y9r5vDQ+IXHseOal2HpNoFBvw6XedhtL2ojBLgKS68Ov3P3tZvgbF9cSQu0sNVZwkitC1LCtI1P9z9oU9IyGTusuYXf8N4MdIq+wRyggQ250wd3FE6BDZJsAdEZCgw2WdT62Rki6nA5jo/tycZ5WF4z5dGpQKQv7RSaVmtCqaA1eZJbaMqJOq479Yr99l3oHjSpbQ+lErD1RdWkZeJUJyLNX5ZAdvkfRDUZxOP+MulWhINSlPwTneAsGUaNBYIEHRID7g3Vx7BWyG7DcH6AYG7Q459BzUJ2ZG4HEC4noLN2b1d5/SBZsKGcLn0/8pIv7OdNKYDz7rPLVgq1obd9qn40C6vNxSeNK80rbaqqZ1rud9KfBx/noFM0UBImUapGmCyOEIpUeDm4DJF3PrftupEjQaESe4h/CC3ZSFRTudVfq+V+BKHSr1z6BW6xyxzVX5uD52AJ5+lCN/mh+NN5Mf1X3AfNOsOqw5RfMXpFW4nzP6fAgbEoFWeJbDr+6xxa4IIq4i96/wWCB1oaZlYxU3VP4OMU/SjAsjvqeflmF3SlBALxFuntKp/Ta90HsXFzRNorF/tthsDuCKOgHqPC1IzgqZxMcwxwGXZHCQSvhFsvS9h85ruvmHOL5AewDFKxegrQPQ55I8SWF/pSkMTv4U1dKv13IkZSpizZ5aOLpJ8WbQp1MFvWWNxHO0cXbH283pHZLsKyQCrOw7cxcVD2jQWWBB4CA+4NzdexUs9YVJOPdr8Rja1mRLN/WQYwMCcarET9xjsD/nSC477CKcUfkhZG5xodOb+Rsz6K4TARyiY31BOaCZZxhOCDn0KCMLu9TndVasMHgetYNcaHDP6cSQ0p2eS4OHDogdAVG67D6WK0CA9T2ipy9veZRJFAbKiRvy2k4+7oHNGUGzu40/azOsKd87nfqN/J99yv+GYxQ2WZQeJ+vRbtFIYPa0YIwuwk7mEMug3eOjfqHTFNA51r5tMy5sZlxDMWmeh7x07wJcDdt3cTMolRLXmBb3jTG+t1UgiJ5Y7HWaFqHaJfiojj/46zs5FhU0GLeXe6TIN8HEJ5L8JYFwqHs+JI7L4UUUXzYaRQn+IkVZXTQat0VLqdbQJT/z7//WivKxtpsHxNKi6uKN/rZ9wRFXiCnsN/iVj1zXPcQfj3enO5sNtAVstcoJNRhQ5LAqHNmLxbafdwE8Z25O3O2A6ijQWiBB7yA+4NzdgmdFxOo3G3yaW+oSJaQ6Dmx75E2R3kCpEjOhRiybt20XRU4E35JeuQxMmYBYQwauGBwePUB5KvqAQjx4IaEdHNY9ntqsNciJa8cR0t4qZOgv9ppks30G56LIHtqvca87lShlaslIOFCn74I+VFBltnyFhAc9h5xoGdSDNqPSsgX2cCV/gCnGETS97oR1MDkYMiS3kzhXFhBofu6tE7Y7dCjgQe5gvuQ4c66Dpgpj11g1b84bvRGl5Qn+NAHcCctoY/WFNiixSDrh77ek210LoX2+RDjCQISDkKlI09ORqE/s3qAPE4rNn6hFoU3rUYbim6+DkTxhk9kNdiEYt/ia/z1IgzfNR4YwiHT1BI6AGg/VhGeuCW5+qEZbrakbBf+csfr4ZEhiR7L6nIO8jDKK/uzw39ygd5LVHY5I0wzJmwcDHrI8RPKrx6AW2Puz6EaFlCy3Xi9yfojW6Rt5FXs8pujQXSBB/iA+4N4dRZoFsbVzhOkjBoqBmi9lwGLu06T5uOEMvfrj7hkcD/A4IuEAWVrj3T5aL4BlKjn9K0pHYJ/DWz7eEXaNTIdAak1qgXtvK6lZohRIRIXzwHOQIcX2ME0hwl9o4HZm1hap6mhnJg2ZxNY2NlpF5prPPFUiTeyA+WXDRzuKEIF70ENSN3aMLaJYGoZfcZtzD71iqOgn+VWxiiPYzySy4SNBjDChpoa0cNISkirOUiLdodWw1+DB8XfkWCYPgEkFeH39VO/T/6OFJeI2z9ewOX/5Q68V9dFN6/kDciiDAduEJf+x6MbbA1BWPoVp1KuNgi6JcxdFZdXDs+974no+cXZibim3E3DrBXjZA9TIKplvB6/0fkZ+MFZEAuHYk65QyldcuW4zYZjHua7dQNRSuaTrVD1vH+xXoQ20kpAo07BwHLQ3F/OraCWG61EjH7kOKkTu38EGV3Tw+J5XtlFXT2C9E8A6eC2k+GvuOmbNrmjQYOBCDSA+wPsQ272UL59VaoycUbwyDZbtbXr4Yu9frjn24RzBqqeUfY8WaYJXmq2NxmjFlau6UlEfyDanjBR7F/OIVyzDHlyKNQ0qFXlnANZAiPHiLvcGdjhNqMdqlMwCaW/Yfs0tEKtNEaSC371RBSjCQuU6sf5jcGYEvfq0ZIdyJJHUIh5H0/sP1PJga482I9ZsLdb8hsPfTqkRgaUdWHcD0pozzmUngr9tQcrP1Eg/wOSI5lNSpXsmgjYXRz3xlnO8k49L82A++pkXPAVQfiIjKA6DIJfxMf08INrYkFCd504AAL+FTqxahYQlIkx9MIGbQdbeKVc5Z+I2iad/tfnkgTLTSAHATiKzQ/+D5d5OCaAdQenjjmeCWpb4L6hbHllxZCKfrvk5OBrn9e+WroJcG7xEn68/8p4F743/rPtrVg5lnkGpjJakyPHqv98t++X/BQlFsMy0NSqoTit+Z623X1Dg3gkhL7a10aF4PV5Gukjy4nGT+N17W4E0kK8kpnoC0yjQYOBCHCA+wPwJ1Okfe73ueLMAJzKSNWnOQsCIPmuiig7CLQ6ZSpB+f+YuUXxMDhyYhaWwO1IdVcA2sJnm78/yTxsZzKwZj6saIuwUM1MjRIjW0+N7QIuzLzgOFi2VlRTwa34kFCOov3K81HwORacZT2RJQ63DEmclWe30gTsXBXO4+CZv8iBAT2qFEn2GAEA6Cqb51X71lHUlj92J35feyOBb/hbt2A51FKVeR7Ob1d7gBfTrLmMG0Fbrm5sFo4abzJ1pk5WmDGTvKnXjQdIIp7B5kZuqYd0tOGH3B/H29OkdskcLFhk479hMlXog01qOTt9cEZXHRNhsY10RNmin4X5teAZofAnLpCuvUQ/7dgLfEm5DrM8Oz2rOZONXnLyuRYvXeVWCblzyy/Wtgdau1gbpE06g5f2jGBdF6P4tYEm2ikrWjiXmqeefsOgtYt0ZY+8sG/SPqhY51rRNvDZbXj2hXh6tb9TnrBZexz9aU0HAvOtfVFtCTAKzDioRNKTY+LOOn+jQYOBCKyA+wPsL2ujrX2dGycZl1Ww6f6T7nujYUAzTbifSe/Kn+G06wk+YFDGfjFAmI+z361/qQJMdfNxxIzu8KkfS4n9o5MZdr9LQOeNI+N1D4zBddwjN6iHUH6S/Z3pY0iZmdzc9N1j6jGk5BA1Ec3eTpG6Uul7DZMmPk4FY6EtrIXY/5p/wocvXKW7uGY84EFIFdGD2LM9VpBG7/3j3PG9t2HV1LX0yQ+6Ni25jGjltUVUYOqnIiajbWg53H4JToMk4bbDspPIn9ujLSQl9g846gABkdiTUEjiT5rqwUyux7Lmg8HjO7fLuV1Kt/JuC84eI+W+CDOgoFoEomgFj1TAb215gsAdmiYQ0sLmFHJfiZTdITSKl6bQn18RCvlomRAICuHC3zHJr2pfHEO4Flz356M8djkSkBBi/rVUWsprIDnRCWwjU2ZtFXtwATPx2rDlYw+6Dl8ttac+5/q/S3jzn1J7otzTg3rtwLxord/LGPrEPGOqT0r/ZY0ZFHbSoOl8hYKjQYOBCOiA+wPCh0FzRPu/G3YAzhX5NtLN1EPvPI+hP+dVsyYn6TXnmNi5TtUTR43PHxqksEHMXZkxDyxFePIXYwsa8gpobgFzu24Vh53zpz8CZ0q/YdNPIowf1Dnmp1aQaTDFNlUV/7+pXtAjas9nny3M5bGU589I/G+6zLBIT/h0jfMfoW2CwoZE0GyFe9ngEnoEz7t/5GDXwZXVDyRFo8IXSc8ol3cUQZIMALDqCrr8iLLcK8zBOJigXVkbZJDC25D1yLf7VKbGGgsvjqmDHxn/j3g+afDRMA0K1HoRoTIQrOjcv7w3w5zom6BSiRLkYqQhVOZNNl7A6gIpYlWVBPhjoQxZgK1LtGE3JO+4lZMwEM3mFjGMIJEIa1DFESJaQXO7UN/ovdgKDRsTamSHBehOPP8uJsRPze0o7mEEofsrNvkcij+7CexbTbgfiG3C3jmvNi/2orG2E10W4Az67vJ7LX1JKdbIhu7n0R2zRe5p/91P50ODrpONSmQk1Ce4QXKHOP+jQYOBCSSA+wPCprath4LyUHi28GXhCbZVV6+tBOFJGJT/vYikFeGCX5/oQfn6zsLIo7uWLYmoUPwy56qYAUlPNwYEqUJUKwrDHtX4AM4J28IIVzqBMME8SssRiam76gJQrdg6bbvGVTfJztZuFwRl8C8bMnDDngZxcmuvFM021J6oLNLOrnmArJmrlv0oEm1YhcCHWswUI95Q8yag6c8hhfDN9KdX+XC5cMJ6gNw9BCA2BMhOcQ0Y3hxZRt0JYh3DXhYGGNEdyQXaitDnRPIGcSCW3xzIvKHsIz6+m19dmymU5JRrECc6RGH4lMTuY9+dokZGKBWO+inPlPWw5WyEeVHJCdL+/qxTNMns+xwDwKCIAhWNDlNs3TAIQbPr+obRy9aMe2Ry6yfbdKMqWoQfdPRA19BGANvpRdPJgJ08ldz5H/8l5oNgTmsXQIzuCQPiHzqYVWfDznU8p+d34g5n9sA7yQxJr0r+COKCO8R1z0T0nKI+tCqW1KVhLm0ok5jC7HHLavyjQaOBCWCA+4N/fxNE0WW2c8ULBXMiz7ymtXi23KujT3leEQVHb2NHAE3xPHFFrUOfzstt+BYivh5bJ8AVEV8xe2Ck8dyAxy7g8gvy6K6gfvN/3pv2yeyEP1398i4plsfIETHcNqH1mTa4rXMrwX7S4umhBo9+U2Db0clQpg//0w9/o95GVRYEN5TvypwFr8veVbZeQ8+ka4vs4+Sv2Rc/2ZYGYqyp7iDsRv+yOozUhQkl6PAnkpimhWJ2fUsShH1LLTVsanN6rlZ9Re121xNPVi7OIAAgRm9BtZSmu+1WrSH3dJfkVznCDQk4tqywz28639OhRiv98uFo1StKOGTG83WA60a8KCR7PMCP/NYPM3FmSia5pk76wqH5NJ8Z9Y82rqKgI7HFTn/RtLJ/s53vHNrIq43jMWAQFTgv0SArWhGIjyLF+EUWawPg46vYVtgQl28KI45Un4MuAROKMMi38BKhYhBeLGiqyz5uyrnO7p/NRrrTWlgkxB7Xinah6vnpyOo2YFludtQyVAKx4gOoZj2CIzAftzmOswRkeVMy44hh31MMMaNBeoEJnID7g3l6772nIV7HCcwbA6junN9kxO+xQtxTQO4Y2ABBoaa/cNzE29kFgrT/Q2nwnO9zaw21nT78QnzGqRvwmIGjnR3X+iBp7N11v/UIfFKHZMXkTy/vtGUWeNkj4HIt2wpDxZsTpByWcekKGprrYbEn7ACSypLBEsqBzFFEck/V8j4TbI+35UFf1e4etoEvPNWAwVIsqwpWCua92fr+EjTnhicbShVe3zWW7m+7iFZysMkw+GNmQ8MD4Av3O3npbj/BNQPft1vBgcq41zyNxNxJP+p9h9QIsrrAJAiyFuC9Zpn+QGzXTgotOUiw8Efwmsur+ON4WJphGp4wo5asKL+hRbnk7k+NoyJSP002cTisRWXBtjR/s4DUBFKMkgO11dICOHn8+sEqAS65bIeYSiwP/WZfZOsFGedvHrM1jMYQpb8mV/3xZ5xs+yUa0PcIIdA4pYsn+L0yLoM0C5Ljrcy8RR6t+gdyWAm0R+WN+i4mmE2waWcwyKNBdYEJ2ID7g3l1Pys+XJJ7Qg8stQqvm37FLd1bwX8ZnTiONnavmvtK6p4MMcGgBWGRjbIMVQB7AqZUMDPNC2JXnESUun7S1nxmxMLkjvufqvCTylLJw3l8vWY053lYqxDgumVK0mYn/TCSVbow8bMupVQ69VHADKnzBurIGEylvXe39T+pei1cRPbh9edXg6bx87ktbKUjQlU9PgGs/VnzzAxx1xxVFwF9+s0YizfE3MIKtM2COoC1da4ATzg/xwbHhjAvbA9KLoJSqN4mQmLeCxkaiBUD8Z1roHk13Wlga034x1hCKH38yH37jfB18sg3Z7I6x1yPIlIv7AD5SJUThkDVs9eTeyeCkDD2t6ozY0oKq5vkyg1qO2JQUbWQyz3xCpO+vPu9rNQSVHVg0hPa3wY+pY598P0DgKuVICyja1yU+1VmxcPfNjhIbZgg0JMzK6LWp/+JtvCQUrTIO6XJat29s5eg2o1quXPVKAbrcK4nbZ1XrRSjQYOBChSA+wNAO3IfDg7FDTcOEF31BpIAPbZeUYsOXbcsem19bJnCaGdPhYNZxSTo5JyVpqr7281j6AKDJEVwtWfR8Wk2fuvlDm7PFITJITuEsL5Fo0DFs2UGvErLbT8elzSroZxDX/72PVCxPoXwLlg5MRVqjIwcNGg3aW8iZf5OX1/Ml+3jRDgiOFH7FF8/d0tQi0XNqhkEp6mEx1KcvABMpev69oTqlLXsutUcWN5KWGn/1xD3xkD8X3HHb0kwWLqsx5ltZelFDjxBufUDX7b0gCkSOE+Es9sZkaHIuhYkiTKH3SEwlfGnkkgSteqF9NVY6c7JQTcXKxFDMtrVnSW8RFHs2BpkMSgE+XNJFewyVim7YvEliS6VWQHbn44ZfA88oa3GqD99+S9TMTlz5lHdJMNpv6ICLJTWbin9ygixUIXaWUORSQbRcaHjTNki+Vq86Wty8gjK/TSYCUHMDeWCECjmltx9AE1L3rhX0uwZ8+Hoy1zibxlIQkJqenfgOybh0GWjQZOBClCA+4N/f0RG1ixyAluQZm9K34TaPbenX4ZmegKfFKA1wiNq+USjDk6bEhxznEngwQgmnLzmjAJVKQCSCHhSnBIQjUtdfV9bSgyT04kk7bqLrq7Huqzms7DVZdgt1xNgLZPUpQMQolAJr7AYNi+v1R0fRhemMvi1YJunKYpmNZD4TJ+dTz0WXVga2qBChcK/GUfj7rSZgfArYCMyQoCWBy9X7k5wCUxfHOI+iAbWLurZNjqYw+ls2bvkEdPXc5us4BMHM7OkrXDZ9nLR9O4meBDWmYwek2hKMWv9eFXE4lORK9V4MveU0pZU1vxzKSb3sMyyy2qCHHVJSe3yfRjssT8S5vVSq3l+8L6MAGT/T78p1P0ExYOMNDIBfHt1kNAn1UhBBOAXMFdY88fI85j02ZqZ+kxG6u/iZSrDp00+WQWkiGbEonSPoDwtMu9IYE7vLvKto+aK+uiNfeTZjwx7EbvaVjArfai6uNIwVUxQkakHP50IX91U/dyd/25dWaHTqqi9FvMh0g4zwhbHcsxiE/kmo0G/gQqMgPuDkZGrBRmesCoC5IEOj8oHaszhHMn8ANzrTfMOsi5sy1o5c3B02eTeADOq3PqYsTCuGy7R/T7BP55sCDOJSKhB7+NTjGYH4YV6TdHWodoNCT1gBFtU9cNsHPsozIM7QJtrVokziMEkBSEbtFtEoGWtuHvS8xj8JZTBLXKRXlaQGsEJZ62ZhyasmpcCXCD3sZV7zCakrJgvXOVmw5jCvpRLMhe9kVNiB3wtVnK/djl4eyyYNS/Be4TsjzSIuQCVrcL2C7vhxTd0E9WxLRI49VhG4eexeKLvwYy4OPhJE+ekfiPwd7aMzPQklyGnfbSGDbyB6ZQgLIKtE/BJ1viQUpSM8daZZ1KnyTsPRDV0Y4lv2Beab2hxbhuwczzQHlAJbE35uYd2oKyj6DohLD63NLBaKMTHf+ITsDzDFr4vJEW9+ccVKI2IW7eGm+LCuinQBMq0p0zAnT4r9oNH8vFOwbh6xPX1vcrDu/qugKXcfZUaJV19b0L1eDkdDncHQpluyTPhR26yh5NbiEtnRFKKIG5PKcK0F+W5M/rKgyNdK3VFJdSy/X3Am13xGxTqghMeKdRKQo4ASlnrZKEXo0G1gQrIgPuDkY2v6CDaq5pOBwokW/A//y3h7A4SyBfMCra1pFqEBzIVFnFRfrt8u15z9fUEIDxXDPArLJazHhCIX2JAONjkUskQgfBNEGtvnfGGVJYedwO31oE9GFKrVAQmJ6Zrp/SgKW9dYL1rNm9D47N9xQUDZGm0d84pUcQm3llo+npX0nTmeOU241KgijOwj6dNu9ilTD0VFpSToSVigLGIW/ZA5w1HfkgJCdO2hpnuo5pGTqqUsuADZDOyDJ5WyFh9eqcuuQjMOfdwWSbf/rhSVv6FhjIsFu+2QgCoHRQIOGGYKe8V9VnHETk1uq59qU+D0VBsTgZLU3NIeKvbtYV+ESemWixZ2VcycoN+w4Nhvo2JHTnlxHrrrf080QpMNkhbmb6Sr+cFr8z8AuQpvDL12Co3wo/d90mFn9pLEkb6iSH7eMFujUPMUsbOWkyr4WZnPl+AhMP2DUiPBwvOPtMymqRBkQJV4/5XRxolfHP5Ug9R7XEeq5LAXGaGKn4N396LRzDCm82hGJ8YU0d1nZHSjgnFNyqojYSCe1JYRWbx0Mi1bTCJ4VUp28UX/em+zuejPNmjQYaBCwSA+4N/frAhu7Se1Vk3EDYBgE6yYrQ9HjQSl0VpBO9+o3x9441pPQGyfSYZSOm2zadll1ivz/yUQPCaaqJBJux2AC74E/FCYDaD5ugqw9OXUQ0OhlziIBHPjL5OyXOk0Uy5qY+BGUphZi9yveQVihe+1IH/lLTgClOzTJNsI2QgPZCpWtZDgD/0ysO87mDVggB93rLElRncKWF/jXr7GkMhBwQCpkaJqJiIS03xUHXBYcm4vQCkGIoWpWUUDlo4hotQs0NRhQFMH8QzSDP+I00aG7gbk8TcoeHoUliyNsMhzmJ6w9WD7c8rdep0YqAQETY2KkOvyX/jUcZiGpFY2r9kaxdDOaj23Yj91+PuCPrgaBDNpbJkueydSa+duPl4fTxx0kNy7q2KDXm7pNVZPLso9JHrdnw4f9GI2Xo8Grykj1Ul9T633z/17Uf1A9LgkVtSVfCquIRUC5Yb1V4O4lmCCrTbIJLQYACW7VOAZSig6aEe8PCAIjTTM508ZCRLyXbq7NejQXqBC0CA+4OAfawq7rc+841xn7poaqevPFj1pLfxOGNchl9ciD+gGyzWxB/xsCID+nTVnIkD7D/GGN/rLQbZM8H/nBIAev7etcTF6lY9DHzLTbNcLhaCarJtahWNcISO0Q6H8KZ9tQMmWrsRuWKH97m+ktIn+W9VmjHOt0zTpl9vDEEghmDumuTbPih1BuT2XdpmdSFAkE0aL7c94+DmMh4ty1NCNe5U9XxGmAJE2X6SJ+/8RIwb0q2qi4cXGtErOJcs1iJIyzNY3sUfwwuhRgh5aoILHvb7op6CUSra6naxswSnyrUIf31ixyPM89TdWunmnNxCESOmc0dxr8YezmShtH9vMi4iK3xqp5loO8B3o1AJv1cmQXfmSFZSS6TlO9QNjm0IaTf4nkqKPIuJ7dJz00sBH5h9zUaMOwnWw7dBV7JtHFUtfuMt5ON3rWQDGOOOSNb4lbDRfb+NukSx6pQpe7Jhi1AZgldTrc0KiJN+e++k3oMeJi1TcOKjQWqBC3yA+4N6dDwMxHNO+IQBmpdfP0txPhVzIcgigTgvKU5Z5B1UsRKL9ntO264vpfXb5qIKZxsTL5gRb8cr9SYTeqZALPPTiPyGprMmcMYuymH6kpdtssAZlmkq4Fkqn0MpsUuusampz8fBVqvkLCLFskN9a3CSKYFYtq5xs2tOY4ZbQOZ233nmPWjtAdzO0TB8XKDJp0uSQqZL8Nxi8qDxe/jPJ3BsNvZ45GD/a1d586VN6+tJnhpq2kUi1JGbjYm748Pqn2jbshfpzO8yxTafKk8YdMTvOZUTJDspa9HGPxojq/Kre4L3WNpEFtsvcB9voh213Jgifb5VIHZtq4wfBeIRSdF2sI+CEpWGXpuLHN1oOiprwIrq5LpEHhHGxQIiDYSiIk/gYzGadEuyUOq8o2B1k5oULZE+dho02hlKtK0cCWv5QADlyz/QhXPZkiwqzgzCRJDGj/1y02xNbBb4ZFanAXgtm/l7fg==",
+}
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -12,6 +12,7 @@
import pandas as pd
import PIL
import pytest
+from scipy.io import wavfile
import gradio as gr
from gradio import media_data
@@ -1902,5 +1903,12 @@ def test_dataframe_postprocess_only_dates():
}
+def test_audio_preprocess_can_be_read_by_scipy():
+ x_wav = deepcopy(media_data.BASE64_MICROPHONE)
+ audio_input = gr.Audio(type="filepath")
+ output = audio_input.preprocess(x_wav)
+ wavfile.read(output)
+
+
if __name__ == "__main__":
unittest.main()
| Microphone file input might not be valid wav file
### Describe the bug
I have a script that uses `scipy.io.wavfile import read` to read a `wav` file. Since I want users to be able to record themselves, I'm using `gr.inputs.Audio(source="microphone", type="filepath")`, but I get an error that indicates the file is not actually a valid `wav` file.
### Reproduction
Here is an end-to-end minimal example https://colab.research.google.com/drive/1fVoqhmjk07Oq6Omj8khC_3b3LGRemEvp?usp=sharing
### Screenshot
_No response_
### Logs
```shell
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/gradio/routes.py", line 270, in predict
session_state,
File "/usr/local/lib/python3.7/dist-packages/starlette/concurrency.py", line 39, in run_in_threadpool
return await anyio.to_thread.run_sync(func, *args)
File "/usr/local/lib/python3.7/dist-packages/anyio/to_thread.py", line 29, in run_sync
limiter=limiter)
File "/usr/local/lib/python3.7/dist-packages/anyio/_backends/_asyncio.py", line 818, in run_sync_in_worker_thread
return await future
File "/usr/local/lib/python3.7/dist-packages/anyio/_backends/_asyncio.py", line 754, in run
result = context.run(func, *args)
File "/usr/local/lib/python3.7/dist-packages/gradio/blocks.py", line 271, in process_api
predictions = block_fn.fn(*processed_input)
File "/usr/local/lib/python3.7/dist-packages/gradio/interface.py", line 558, in <lambda>
if len(self.output_components) == 1
File "/usr/local/lib/python3.7/dist-packages/gradio/interface.py", line 706, in run_prediction
prediction = predict_fn(*processed_input)
File "<ipython-input-18-061a2456d566>", line 4, in inference
sampling_rate, data = read(audio_path)
File "/usr/local/lib/python3.7/dist-packages/scipy/io/wavfile.py", line 267, in read
file_size, is_big_endian = _read_riff_chunk(fid)
File "/usr/local/lib/python3.7/dist-packages/scipy/io/wavfile.py", line 168, in _read_riff_chunk
"understood.".format(repr(str1)))
ValueError: File format b'\x1aE\xdf\xa3'... not understood.
```
```
### System Info
```shell
Gradio `2.9.0b9`, colab and spaces
```
### Severity
critical
| I encountered similar bug, when using [Speech Verification demo](https://huggingface.co/spaces/microsoft/unispeech-speaker-verification). It work's when using prerecorded samples, but using microphone recorded audio fails.
I downloaded the code and ran it locally, it gives an error at line `wav1, _ = apply_effects_file(path1, EFFECTS)`:
```log
formats: can't open input file `/tmp/audiocyuip60n.wav': WAVE: RIFF header not found
Traceback (most recent call last):
...
RuntimeError: Error loading audio file: failed to open file /tmp/audiocyuip60n.wav
```
which suggests that tmp `wav` file is not valid
Whole code is available at [Huggingface Spaces -> app.py](https://huggingface.co/spaces/microsoft/unispeech-speaker-verification/blob/main/app.py)
## Minimum example:
```python
import gradio as gr
from torchaudio.sox_effects import apply_effects_file
def fn(path1):
apply_effects_file(path1, [])
return ""
inputs = [
gr.inputs.Audio(
source="microphone", type="filepath", optional=True, label="Record"
),
]
output = gr.outputs.HTML(label="")
interface = gr.Interface(fn=fn, inputs=inputs, outputs=output)
interface.launch()
```
Package versions: `gradio==2.9.4`, `torchaudio==0.11.0`
I tried with older versions of `gradio` and the issue doesn't occur in `2.7.5.2` and older versions. The first version where it happens is `2.8.0`.
I suppose it might be due some changes in [`class Audio(InputComponent):`](https://github.com/gradio-app/gradio/compare/v2.7.5...release-2.8.1#diff-6567432b8a2d4bd7c0ee6053e2173ff0b01da4b55ceddd4228ace41b164ae4caR1075) like adding `, suffix=".wav"` but I didn't checked that.
I have checked above code with older versions of `gradio`. The newest version that works without error is `2.7.5.2`, so the `2.8.0` is first version where error ``formats: can't open input file `/tmp/audiocyuip60n.wav': WAVE: RIFF header not found`` occurs.
Is there someone looking into this?
same bug when loading the recorded audio input.
```
RuntimeError: Error loading audio file: failed to open file /tmp/audio284knjiy.wav
```
package version: ```gradio==3.0.2```. | 2022-08-09T17:48:53 |
gradio-app/gradio | 1,994 | gradio-app__gradio-1994 | [
"1934"
] | 393c1f47d5537fb8c39c26fd4e117358b44a8126 | diff --git a/demo/kitchen_sink/run.py b/demo/kitchen_sink/run.py
--- a/demo/kitchen_sink/run.py
+++ b/demo/kitchen_sink/run.py
@@ -152,6 +152,7 @@ def fn(
* 3,
theme="default",
title="Kitchen Sink",
+ cache_examples=False,
description="Try out all the components!",
article="Learn more about [Gradio](http://gradio.app)",
)
diff --git a/scripts/upload_demo_to_space.py b/scripts/upload_demo_to_space.py
new file mode 100644
--- /dev/null
+++ b/scripts/upload_demo_to_space.py
@@ -0,0 +1,78 @@
+import argparse
+import pathlib
+import shutil
+import tempfile
+import textwrap
+from typing import Optional
+
+import huggingface_hub
+
+
+def upload_demo_to_space(
+ demo_name: str, space_id: str, hf_token: str, gradio_version: Optional[str]
+):
+ """Upload a demo in the demo directory to a huggingface space.
+ Args:
+ demo_name: The name of the demo to upload.
+ space_id: The id of the space to upload the demo to.
+ hf_token: HF api token. Need to have permission to write to space_id for this to work.
+ gradio_version: If not None, will set the gradio version in the created space to the given version.
+ """
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ demo_path = pathlib.Path(pathlib.Path().absolute(), f"demo/{demo_name}")
+ shutil.copytree(demo_path, tmpdir, dirs_exist_ok=True)
+ app_file = pathlib.Path(tmpdir, "run.py")
+ # Rename the app file to be app.py
+ app_file.rename(app_file.with_stem("app"))
+ if gradio_version:
+ readme = pathlib.Path(tmpdir, "README.md")
+ readme_content = f"""
+ ---
+ title: {space_id.split("/")[-1]}
+ emoji: 💩
+ colorFrom: indigo
+ colorTo: indigo
+ sdk: gradio
+ sdk_version: {gradio_version}
+ app_file: app.py
+ pinned: false
+ ---
+ """
+ readme.open("w").write(textwrap.dedent(readme_content))
+
+ api = huggingface_hub.HfApi()
+ huggingface_hub.create_repo(
+ space_id,
+ space_sdk="gradio",
+ repo_type="space",
+ token=hf_token,
+ exist_ok=True,
+ )
+ api.upload_folder(
+ token=hf_token,
+ repo_id=space_id,
+ repo_type="space",
+ folder_path=tmpdir,
+ path_in_repo="",
+ )
+ return f"https://huggingface.co/spaces/{space_id}"
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Upload a demo to a space")
+ parser.add_argument("demo_name", type=str, help="Name of demo to upload")
+ parser.add_argument(
+ "space_id", type=str, help="Name of the space to upload the demo to"
+ )
+ parser.add_argument("hf_token", type=str, help="HF API token")
+ parser.add_argument(
+ "--gradio-version",
+ type=str,
+ help="If not None, will set the gradio version in the created space to the given version.",
+ )
+ args = parser.parse_args()
+ new_space = upload_demo_to_space(
+ args.demo_name, args.space_id, args.hf_token, args.gradio_version
+ )
+ print(new_space)
| Add testing to make sure share=True, iframe, and Spaces don't break on release
| What we need is a GitHub action that runs *after* the pypi package is deployed and programmatically tests:
* That `share=True` is working with the latest release
* That demos appears properly in iframes with the latest release
I think we should do a pre-release prior to the official release to make sure we don't publish a broken version.
Ah yeah that’s much better!
| 2022-08-10T20:15:10 |
|
gradio-app/gradio | 2,003 | gradio-app__gradio-2003 | [
"1972"
] | 8e24d5d64639d8b908ffbdced07af71811205593 | diff --git a/demo/white_noise_vid_not_playable/run.py b/demo/white_noise_vid_not_playable/run.py
new file mode 100644
--- /dev/null
+++ b/demo/white_noise_vid_not_playable/run.py
@@ -0,0 +1,22 @@
+import cv2
+import gradio as gr
+import numpy as np
+
+
+def gif_maker():
+ img_array = []
+ height, width = 50, 50
+ for i in range(30):
+ img_array.append(np.random.randint(0, 255, size=(height, width, 3)).astype(np.uint8))
+ output_file = "test.mp4"
+ out = cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'mp4v'), 15, (height, width))
+ for i in range(len(img_array)):
+ out.write(img_array[i])
+ out.release()
+ return output_file, output_file
+
+
+demo = gr.Interface(gif_maker, inputs=None, outputs=[gr.Video(), gr.File()])
+
+if __name__ == "__main__":
+ demo.launch()
\ No newline at end of file
diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -1779,7 +1779,11 @@ def stream(
@document("change", "clear", "play", "pause", "stop", "style")
class Video(Changeable, Clearable, Playable, IOComponent):
"""
- Creates an video component that can be used to upload/record videos (as an input) or display videos (as an output).
+ Creates a video component that can be used to upload/record videos (as an input) or display videos (as an output).
+ For the video to be playable in the browser it must have a compatible container and codec combination. Allowed
+ combinations are .mp4 with h264 codec, .ogg with theora codec, and .webm with vp9 codec. If the component detects
+ that the output video would not be playable in the browser it will attempt to convert it to a playable mp4 video.
+ If the conversion fails, the original video is returned.
Preprocessing: passes the uploaded video as a {str} filepath whose extension can be set by `format`.
Postprocessing: expects a {str} filepath to a video which is displayed.
Examples-format: a {str} filepath to a local file that contains the video.
@@ -1919,7 +1923,7 @@ def restore_flagged(self, dir, data, encryption_key):
def generate_sample(self):
return deepcopy(media_data.BASE64_VIDEO)
- def postprocess(self, y: str) -> str:
+ def postprocess(self, y: str) -> Optional[Dict[str, str]]:
"""
Parameters:
y: path to video
@@ -1929,6 +1933,14 @@ def postprocess(self, y: str) -> str:
if y is None:
return None
returned_format = y.split(".")[-1].lower()
+ if (
+ processing_utils.ffmpeg_installed()
+ and not processing_utils.video_is_playable(y)
+ ):
+ warnings.warn(
+ "Video does not have browser-compatible container or codec. Converting to mp4"
+ )
+ y = processing_utils.convert_video_to_playable_mp4(y)
if self.format is not None and returned_format != self.format:
output_file_name = y[0 : y.rindex(".") + 1] + self.format
ff = FFmpeg(inputs={y: None}, outputs={output_file_name: None})
diff --git a/gradio/processing_utils.py b/gradio/processing_utils.py
--- a/gradio/processing_utils.py
+++ b/gradio/processing_utils.py
@@ -1,13 +1,17 @@
import base64
+import json
import mimetypes
import os
+import pathlib
import shutil
+import subprocess
import tempfile
import warnings
from io import BytesIO
import numpy as np
import requests
+from ffmpy import FFmpeg, FFprobe, FFRuntimeError
from PIL import Image, ImageOps
from gradio import encryptor
@@ -518,3 +522,53 @@ def _scale(a, n, m, copy=True):
def strip_invalid_filename_characters(filename: str) -> str:
return "".join([char for char in filename if char.isalnum() or char in "._- "])
+
+
+def ffmpeg_installed() -> bool:
+ return shutil.which("ffmpeg") is not None
+
+
+def video_is_playable(video_filepath: str) -> bool:
+ """Determines if a video is playable in the browser.
+
+ A video is playable if it has a playable container and codec.
+ .mp4 -> h264
+ .webm -> vp9
+ .ogg -> theora
+ """
+ try:
+ container = pathlib.Path(video_filepath).suffix.lower()
+ probe = FFprobe(
+ global_options="-show_format -show_streams -select_streams v -print_format json",
+ inputs={video_filepath: None},
+ )
+ output = probe.run(stderr=subprocess.PIPE, stdout=subprocess.PIPE)
+ output = json.loads(output[0])
+ video_codec = output["streams"][0]["codec_name"]
+ return (container, video_codec) in [
+ (".mp4", "h264"),
+ (".ogg", "theora"),
+ (".webm", "vp9"),
+ ]
+ # If anything goes wrong, assume the video can be played to not convert downstream
+ except FFRuntimeError:
+ return True
+
+
+def convert_video_to_playable_mp4(video_path: str) -> str:
+ """Convert the video to mp4. If something goes wrong return the original video."""
+ try:
+ output_path = pathlib.Path(video_path).with_suffix(".mp4")
+ with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
+ shutil.copy2(video_path, tmp_file.name)
+ # ffmpeg will automatically use h264 codec (playable in browser) when converting to mp4
+ ff = FFmpeg(
+ inputs={str(tmp_file.name): None},
+ outputs={str(output_path): None},
+ global_options="-y -loglevel quiet",
+ )
+ ff.run()
+ except FFRuntimeError as e:
+ print(f"Error converting video to browser-playable format {str(e)}")
+ output_path = video_path
+ return str(output_path)
| diff --git a/test/conftest.py b/test/conftest.py
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -1,4 +1,14 @@
+import pathlib
+
+import pytest
+
+
def pytest_configure(config):
config.addinivalue_line(
"markers", "flaky: mark test as flaky. Failure will not cause te"
)
+
+
[email protected]
+def test_file_dir():
+ return pathlib.Path(pathlib.Path(__file__).parent, "test_files")
diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -1,6 +1,7 @@
import json
import os
import pathlib
+import shutil
import tempfile
import unittest
from copy import deepcopy
@@ -15,7 +16,7 @@
from scipy.io import wavfile
import gradio as gr
-from gradio import media_data
+from gradio import media_data, processing_utils
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
@@ -1914,5 +1915,32 @@ def test_audio_preprocess_can_be_read_by_scipy():
wavfile.read(output)
+def test_video_postprocess_converts_to_playable_format(test_file_dir):
+ # This file has a playable container but not playable codec
+ with tempfile.NamedTemporaryFile(suffix="bad_video.mp4") as tmp_not_playable_vid:
+ bad_vid = str(test_file_dir / "bad_video_sample.mp4")
+ assert not processing_utils.video_is_playable(bad_vid)
+ shutil.copy(bad_vid, tmp_not_playable_vid.name)
+ _ = gr.Video().postprocess(tmp_not_playable_vid.name)
+ # The original video gets converted to .mp4 format
+ full_path_to_output = pathlib.Path(tmp_not_playable_vid.name).with_suffix(
+ ".mp4"
+ )
+ assert processing_utils.video_is_playable(str(full_path_to_output))
+
+ # This file has a playable codec but not a playable container
+ with tempfile.NamedTemporaryFile(
+ suffix="playable_but_bad_container.mkv"
+ ) as tmp_not_playable_vid:
+ bad_vid = str(test_file_dir / "playable_but_bad_container.mkv")
+ assert not processing_utils.video_is_playable(bad_vid)
+ shutil.copy(bad_vid, tmp_not_playable_vid.name)
+ _ = gr.Video().postprocess(tmp_not_playable_vid.name)
+ full_path_to_output = pathlib.Path(tmp_not_playable_vid.name).with_suffix(
+ ".mp4"
+ )
+ assert processing_utils.video_is_playable(str(full_path_to_output))
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/test/test_files/bad_video_sample.mp4 b/test/test_files/bad_video_sample.mp4
new file mode 100644
Binary files /dev/null and b/test/test_files/bad_video_sample.mp4 differ
diff --git a/test/test_files/playable_but_bad_container.mkv b/test/test_files/playable_but_bad_container.mkv
new file mode 100644
Binary files /dev/null and b/test/test_files/playable_but_bad_container.mkv differ
diff --git a/test/test_files/video_sample.ogg b/test/test_files/video_sample.ogg
new file mode 100644
Binary files /dev/null and b/test/test_files/video_sample.ogg differ
diff --git a/test/test_files/video_sample.webm b/test/test_files/video_sample.webm
new file mode 100644
Binary files /dev/null and b/test/test_files/video_sample.webm differ
diff --git a/test/test_processing_utils.py b/test/test_processing_utils.py
--- a/test/test_processing_utils.py
+++ b/test/test_processing_utils.py
@@ -1,8 +1,12 @@
import os
+import pathlib
+import shutil
import tempfile
import unittest
from copy import deepcopy
+from unittest.mock import patch
+import ffmpy
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
@@ -131,5 +135,48 @@ def test_subclass_conversion(self):
assert y.dtype == x.dtype
+def test_video_has_playable_codecs(test_file_dir):
+ assert gr.processing_utils.video_is_playable(
+ str(test_file_dir / "video_sample.mp4")
+ )
+ assert gr.processing_utils.video_is_playable(
+ str(test_file_dir / "video_sample.ogg")
+ )
+ assert gr.processing_utils.video_is_playable(
+ str(test_file_dir / "video_sample.webm")
+ )
+ assert not gr.processing_utils.video_is_playable(
+ str(test_file_dir / "bad_video_sample.mp4")
+ )
+
+
+def test_convert_video_to_playable_mp4(test_file_dir):
+ with tempfile.NamedTemporaryFile(suffix="out.avi") as tmp_not_playable_vid:
+ shutil.copy(
+ str(test_file_dir / "bad_video_sample.mp4"), tmp_not_playable_vid.name
+ )
+ playable_vid = gr.processing_utils.convert_video_to_playable_mp4(
+ tmp_not_playable_vid.name
+ )
+ assert gr.processing_utils.video_is_playable(playable_vid)
+
+
+def raise_ffmpy_runtime_exception():
+ raise ffmpy.FFRuntimeError("", "", "", "")
+
+
+@patch("ffmpy.FFmpeg.run", side_effect=raise_ffmpy_runtime_exception)
+def test_video_conversion_returns_original_video_if_fails(mock_run, test_file_dir):
+ with tempfile.NamedTemporaryFile(suffix="out.avi") as tmp_not_playable_vid:
+ shutil.copy(
+ str(test_file_dir / "bad_video_sample.mp4"), tmp_not_playable_vid.name
+ )
+ playable_vid = gr.processing_utils.convert_video_to_playable_mp4(
+ tmp_not_playable_vid.name
+ )
+ # If the conversion succeeded it'd be .mp4
+ assert pathlib.Path(playable_vid).suffix == ".avi"
+
+
if __name__ == "__main__":
unittest.main()
| Issues when loading/outputting video
### Describe the bug
I'm having weird situations in which calling an inference function directly with a given video works well (I can download and play it), but just wrapping it with a `gr.Interface` outputs an invalid video (even if I download the video, I cannot play it, it gives an error in the demo). I saw @nateraw also had to do some hacks to make it work in a previous demo https://huggingface.co/spaces/nateraw/yolov6/blob/main/app.py#L33.
Adding this before returning makes it work
```python
out_file = tempfile.NamedTemporaryFile(suffix="out.mp4", delete=False)
subprocess.run(f"ffmpeg -y -loglevel quiet -stats -i {output_fname} -c:v libx264 {out_file.name}".split())
return out_file.name
```
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
https://huggingface.co/spaces/nateraw/yolov6/blob/main/app.py#L58 without those two lines
### Screenshot
_No response_
### Logs
```shell
None
```
### System Info
```shell
Gradio 3.1.0 and above
```
### Severity
serious, but I can work around it
| 2022-08-11T21:37:39 |
|
gradio-app/gradio | 2,046 | gradio-app__gradio-2046 | [
"2045"
] | 029637cef96c69672bc47ffcc5f091b58b68dd2b | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -151,7 +151,7 @@ def set_event_trigger(
"status_tracker": status_tracker._id
if status_tracker is not None
else None,
- "queue": queue,
+ "queue": False if fn is None else queue,
"api_name": api_name,
"scroll_to_output": scroll_to_output,
"show_progress": show_progress,
| Interface Clear button is broken on spaces
### Describe the bug
The _js parameter does not seem to work on spaces. This means the clear button doesn't work as well.
This is in gradio 3.1.5. I think it was a recent change because that was not the case as of this Monday since this is not the case in the demos deployed for #2012
This also doesn't happen locally.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
### Clear button working on Monday (PR #2012)

### Clear button broken 3.1.5 (PR #2033)

### Screenshot
Logs - noticed we log a lot of things to the console?
<img width="679" alt="image" src="https://user-images.githubusercontent.com/41651716/185654861-38131b2f-740e-46da-9e85-37e78782903f.png">
### Logs
```shell
-
```
### System Info
```shell
Spaces
```
### Severity
blocking upgrade to latest gradio version
| 2022-08-19T15:50:46 |
||
gradio-app/gradio | 2,073 | gradio-app__gradio-2073 | [
"1689"
] | 4f0b8f9829e10f0fce44d8c66cb13e17a5e48100 | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -2006,7 +2006,7 @@ def __init__(
Parameters:
value: Default file to display, given as str file path. If callable, the function will be called whenever the app loads to set the initial value of the component.
file_count: if single, allows user to upload one file. If "multiple", user uploads multiple files. If "directory", user uploads all files in selected directory. Return type will be list for each file in case of "multiple" or "directory".
- type: Type of value to be returned by component. "file" returns a temporary file object whose path can be retrieved by file_obj.name, "binary" returns an bytes object.
+ type: Type of value to be returned by component. "file" returns a temporary file object whose path can be retrieved by file_obj.name and original filename can be retrieved with file_obj.orig_name, "binary" returns an bytes object.
label: component name in interface.
show_label: if True, will display label.
interactive: if True, will allow users to upload a file; if False, can only be used to display files. If not provided, this is inferred based on whether the component is used as an input or output.
@@ -2071,11 +2071,14 @@ def process_single_file(f):
)
if self.type == "file":
if is_file:
- return processing_utils.create_tmp_copy_of_file(file_name)
+ file = processing_utils.create_tmp_copy_of_file(file_name)
+ file.orig_name = file_name
else:
- return processing_utils.decode_base64_to_file(
+ file = processing_utils.decode_base64_to_file(
data, file_path=file_name
)
+ file.orig_name = file_name
+ return file
elif self.type == "bytes":
if is_file:
with open(file_name, "rb") as file_data:
@@ -2114,6 +2117,7 @@ def postprocess(self, y: str) -> Dict:
if isinstance(y, list):
return [
{
+ "orig_name": os.path.basename(file),
"name": processing_utils.create_tmp_copy_of_file(
file, self.temp_dir
).name,
@@ -2125,6 +2129,7 @@ def postprocess(self, y: str) -> Dict:
]
else:
return {
+ "orig_name": os.path.basename(y),
"name": processing_utils.create_tmp_copy_of_file(
y, dir=self.temp_dir
).name,
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -854,6 +854,7 @@ def test_component_functions(self):
file_input.serialize("test/test_files/sample_file.pdf")["name"],
"test/test_files/sample_file.pdf",
)
+ assert output.orig_name == "test/test_files/sample_file.pdf"
self.assertIsInstance(file_input.generate_sample(), dict)
file_input = gr.File(label="Upload Your File")
| Download button does not respect the filepath returned by the function
### Describe the bug
In the `zip_two_files` demo:
```python
import os
from zipfile import ZipFile
import gradio as gr
def zip_two_files(file1, file2):
with ZipFile("tmp.zip", "w") as zipObj:
zipObj.write(file1.name, "file1")
zipObj.write(file2.name, "file2")
return "tmp.zip"
demo = gr.Interface(
zip_two_files,
["file", "file"],
"file",
examples=[
[os.path.join(os.path.dirname(__file__),"files/titanic.csv"),
os.path.join(os.path.dirname(__file__),"files/titanic.csv")],
],
)
if __name__ == "__main__":
demo.launch()
```
When you download the file, it is renamed `download.zip` by default on Chrome.
### Download version 3.0.22

That's not the case on version 3.0.12. The file is called `tmp` because that's what the prediction function returns. You can see the same behavior on this space: https://huggingface.co/spaces/radames/dpt-depth-estimation-3d-obj Was this change intentional?
### Download version 3.0.12

### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Run the `zip_two_files` demo on 3.0.22 and download the file.
### Screenshot
_No response_
### Logs
```shell
-
```
### System Info
```shell
3.0.22
```
### Severity
annoying
| Thank you for addressing the bug @freddyaboulton. This came as my output is a mesh file (file.msh) and when I try to download the file it losses the .msh extension.
Thank you for bringing it to our attention!
Any updates on this?
@irfan-mekic Thanks for the follow-up!
We were working on improving the docs the last two weeks. Next we're working on the 3.2 milestone which will focus on bug fixes. I've added this issue to that milestone so someone will take a look at this soon.
Reopening as this seems to have come back as a results of #1967 | 2022-08-24T05:47:44 |
gradio-app/gradio | 2,145 | gradio-app__gradio-2145 | [
"2140"
] | eb81fa2cf2a17e4660846978a8f4d594072e5db8 | diff --git a/demo/model3D/run.py b/demo/model3D/run.py
--- a/demo/model3D/run.py
+++ b/demo/model3D/run.py
@@ -1,4 +1,3 @@
-import time
import gradio as gr
import os
diff --git a/gradio/serializing.py b/gradio/serializing.py
--- a/gradio/serializing.py
+++ b/gradio/serializing.py
@@ -2,6 +2,7 @@
import os
from abc import ABC, abstractmethod
+from pathlib import Path
from typing import Any, Dict
from gradio import processing_utils
@@ -110,6 +111,7 @@ def serialize(
"data": processing_utils.encode_url_or_file_to_base64(
filename, encryption_key=encryption_key
),
+ "orig_name": Path(filename).name,
"is_file": False,
}
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -850,10 +850,12 @@ def test_component_functions(self):
file_input = gr.File()
output = file_input.preprocess(x_file)
self.assertIsInstance(output, tempfile._TemporaryFileWrapper)
+ serialized = file_input.serialize("test/test_files/sample_file.pdf")
assert filecmp.cmp(
- file_input.serialize("test/test_files/sample_file.pdf")["name"],
+ serialized["name"],
"test/test_files/sample_file.pdf",
)
+ assert serialized["orig_name"] == "sample_file.pdf"
assert output.orig_name == "test/test_files/sample_file.pdf"
self.assertIsInstance(file_input.generate_sample(), dict)
| When caching the output of a file component in examples, the filename is not respected
### Describe the bug
Run Model3D with cache examples - you can do it with the version hosted on spaces here: https://huggingface.co/spaces/gradio-pr-deploys/pr-2128-all-demos
Click on an example and download the resulting file, you will see that the downloaded file does not have a complete name, e.g. `_home_user_app_gradio_cached_e (2)`
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Stated above^
### Screenshot

### Logs
```shell
-
```
### System Info
```shell
-
```
### Severity
serious, but I can work around it
| Out of curiosity, can you try right clicking "open link in a new tab" when you click on the "Download" link?
Interesting - then the file is just called `Download`. Just tried it on the space
At first I thought it had to do with the name of the file being too long - but I think it's caused by reading from the example cache. Locally, e.g. no example caching, I modified `zip_two_files` to return a really long file name and the file name (and extension) was respected.
Getting the same behavior, very strange
Think I figured it out - it's the slashes in the file name when reading from the cache. I think we just need to make the file serializer add the `orig_name` field with the file name as opposed to the full path. Will put up a PR soon. | 2022-08-31T18:44:23 |
gradio-app/gradio | 2,151 | gradio-app__gradio-2151 | [
"2135"
] | 4e1ee4099f5043c11e3d3f2c31c7eb3e81e81506 | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -146,6 +146,15 @@ def set_event_trigger(
if not isinstance(outputs, list):
outputs = [outputs]
Context.root_block.fns.append(BlockFunction(fn, preprocess, postprocess))
+ if api_name is not None:
+ api_name_ = utils.append_unique_suffix(
+ api_name, [dep["api_name"] for dep in Context.root_block.dependencies]
+ )
+ if not (api_name == api_name_):
+ warnings.warn(
+ "api_name {} already exists, using {}".format(api_name, api_name_)
+ )
+ api_name = api_name_
dependency = {
"targets": [self._id] if not no_target else [],
"trigger": event_name,
@@ -572,7 +581,21 @@ def render(self):
if Context.root_block is not None:
Context.root_block.blocks.update(self.blocks)
Context.root_block.fns.extend(self.fns)
- Context.root_block.dependencies.extend(self.dependencies)
+ for dependency in self.dependencies:
+ api_name = dependency["api_name"]
+ if api_name is not None:
+ api_name_ = utils.append_unique_suffix(
+ api_name,
+ [dep["api_name"] for dep in Context.root_block.dependencies],
+ )
+ if not (api_name == api_name_):
+ warnings.warn(
+ "api_name {} already exists, using {}".format(
+ api_name, api_name_
+ )
+ )
+ dependency["api_name"] = api_name_
+ Context.root_block.dependencies.append(dependency)
Context.root_block.temp_dirs = Context.root_block.temp_dirs | self.temp_dirs
if Context.block is not None:
Context.block.children.extend(self.children)
diff --git a/gradio/routes.py b/gradio/routes.py
--- a/gradio/routes.py
+++ b/gradio/routes.py
@@ -68,7 +68,7 @@ class QueuePushBody(BaseModel):
class PredictBody(BaseModel):
session_hash: Optional[str]
data: Any
- fn_index: int = 0
+ fn_index: Optional[int]
###########
diff --git a/gradio/utils.py b/gradio/utils.py
--- a/gradio/utils.py
+++ b/gradio/utils.py
@@ -656,3 +656,17 @@ def sanitize_list_for_csv(
sanitized_value = sanitize_value_for_csv(value)
sanitized_values.append(sanitized_value)
return sanitized_values
+
+
+def append_unique_suffix(name: str, list_of_names: List[str]):
+ """Appends a numerical suffix to `name` so that it does not appear in `list_of_names`."""
+ list_of_names = set(list_of_names) # for O(1) lookup
+ if name not in list_of_names:
+ return name
+ else:
+ suffix_counter = 1
+ new_name = name + f"_{suffix_counter}"
+ while new_name in list_of_names:
+ suffix_counter += 1
+ new_name = name + f"_{suffix_counter}"
+ return new_name
| diff --git a/test/test_routes.py b/test/test_routes.py
--- a/test/test_routes.py
+++ b/test/test_routes.py
@@ -7,7 +7,7 @@
from fastapi import FastAPI
from fastapi.testclient import TestClient
-from gradio import Interface, close_all, routes
+from gradio import Blocks, Interface, Textbox, close_all, routes
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
@@ -45,6 +45,69 @@ def test_predict_route(self):
output = dict(response.json())
self.assertEqual(output["data"], ["testtest"])
+ def test_named_predict_route(self):
+ with Blocks() as demo:
+ i = Textbox()
+ o = Textbox()
+ i.change(lambda x: x + "1", i, o, api_name="p")
+ i.change(lambda x: x + "2", i, o, api_name="q")
+
+ app, _, _ = demo.launch(prevent_thread_lock=True)
+ client = TestClient(app)
+ response = client.post("/api/p/", json={"data": ["test"]})
+ assert response.status_code == 200
+ output = dict(response.json())
+ assert output["data"] == ["test1"]
+
+ response = client.post("/api/q/", json={"data": ["test"]})
+ assert response.status_code == 200
+ output = dict(response.json())
+ assert output["data"] == ["test2"]
+
+ def test_same_named_predict_route(self):
+ with Blocks() as demo:
+ i = Textbox()
+ o = Textbox()
+ i.change(lambda x: x + "0", i, o, api_name="p")
+ i.change(lambda x: x + "1", i, o, api_name="p")
+
+ app, _, _ = demo.launch(prevent_thread_lock=True)
+ client = TestClient(app)
+ response = client.post("/api/p/", json={"data": ["test"]})
+ assert response.status_code == 200
+ output = dict(response.json())
+ assert output["data"] == ["test0"]
+
+ response = client.post("/api/p_1/", json={"data": ["test"]})
+ assert response.status_code == 200
+ output = dict(response.json())
+ assert output["data"] == ["test1"]
+
+ def test_multiple_renamed(self):
+ with Blocks() as demo:
+ i = Textbox()
+ o = Textbox()
+ i.change(lambda x: x + "0", i, o, api_name="p")
+ i.change(lambda x: x + "1", i, o, api_name="p")
+ i.change(lambda x: x + "2", i, o, api_name="p_1")
+
+ app, _, _ = demo.launch(prevent_thread_lock=True)
+ client = TestClient(app)
+ response = client.post("/api/p/", json={"data": ["test"]})
+ assert response.status_code == 200
+ output = dict(response.json())
+ assert output["data"] == ["test0"]
+
+ response = client.post("/api/p_1/", json={"data": ["test"]})
+ assert response.status_code == 200
+ output = dict(response.json())
+ assert output["data"] == ["test1"]
+
+ response = client.post("/api/p_1_1/", json={"data": ["test"]})
+ assert response.status_code == 200
+ output = dict(response.json())
+ assert output["data"] == ["test2"]
+
def test_predict_route_without_fn_index(self):
response = self.client.post("/api/predict/", json={"data": ["test"]})
self.assertEqual(response.status_code, 200)
diff --git a/test/test_utils.py b/test/test_utils.py
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -20,6 +20,7 @@
)
from gradio.utils import (
Request,
+ append_unique_suffix,
assert_configs_are_equivalent_besides_ids,
colab_check,
delete_none,
@@ -492,5 +493,22 @@ def test_list(self):
assert sanitize_list_for_csv([1, ["ab", "=de"]]) == [1, ["ab", "'=de"]]
+class TestAppendUniqueSuffix:
+ def test_no_suffix(self):
+ name = "test"
+ list_of_names = ["test_1", "test_2"]
+ assert append_unique_suffix(name, list_of_names) == name
+
+ def test_first_suffix(self):
+ name = "test"
+ list_of_names = ["test", "test_-1"]
+ assert append_unique_suffix(name, list_of_names) == "test_1"
+
+ def test_later_suffix(self):
+ name = "test"
+ list_of_names = ["test", "test_1", "test_2", "test_3"]
+ assert append_unique_suffix(name, list_of_names) == "test_4"
+
+
if __name__ == "__main__":
unittest.main()
| API docs confusing
### Describe the bug
When using `TabbedInterface`, the `fn_index` field should be included in **Input Payload** to specify which to call.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```python
from operator import add, sub
gr.TabbedInterface(
[
gr.Interface(add, ["number", "number"], "number"),
gr.Interface(sub, ["number", "number"], "number"),
]
).launch()
```
```python
import requests
resp = requests.post("http://127.0.0.1:7860/api/predict", json={"data": [2, 1]})
resp = requests.post("http://127.0.0.1:7860/api/predict", json={"data": [2, 1], "fn_index": 3})
```
### Screenshot

### Logs
```shell
No response
```
### System Info
```shell
gradio==3.1.8b3
```
### Severity
annoying
| 2022-09-01T01:50:23 |
|
gradio-app/gradio | 2,226 | gradio-app__gradio-2226 | [
"1662"
] | 6ee7efa86cd350b588a3589c8390ab9a7d2f10e5 | diff --git a/setup.py b/setup.py
deleted file mode 100644
--- a/setup.py
+++ /dev/null
@@ -1,39 +0,0 @@
-try:
- from setuptools import setup
-except ImportError:
- from distutils.core import setup
-from pathlib import Path
-import re
-
-this_directory = Path(__file__).parent
-
-long_description = (this_directory / "README.md").read_text(encoding='utf8')
-# Replace relative paths to images with absolute paths
-long_description = re.sub("website/homepage/", "https://raw.githubusercontent.com/gradio-app/gradio/main/website/homepage/", long_description)
-long_description = re.sub(r"demo/([\S]*.gif)", r"https://raw.githubusercontent.com/gradio-app/gradio/main/demo/\g<1>", long_description)
-
-version = (this_directory / "gradio" / "version.txt").read_text(
- encoding='utf8').strip()
-
-with open("requirements.txt") as reqs:
- requirements = reqs.readlines()
-
-setup(
- name="gradio",
- version=version,
- include_package_data=True,
- description="Python library for easily interacting with trained machine learning models",
- long_description=long_description,
- long_description_content_type='text/markdown',
- author="Abubakar Abid, Ali Abid, Ali Abdalla, Dawood Khan, Ahsen Khaliq, Pete Allen, Ömer Faruk Özdemir",
- author_email="[email protected]",
- url="https://github.com/gradio-app/gradio",
- packages=["gradio", "gradio.test_data", "test.test_files"],
- license="Apache License 2.0",
- keywords=["machine learning", "visualization", "reproducibility"],
- install_requires=requirements,
- entry_points={
- 'console_scripts': ['gradio=gradio.reload:run_in_reload_mode']
- },
- python_requires='>=3.7',
-)
| Use static config file to configure setuptools as opposed to setup.py
- [x] I have searched to see if a similar issue already exists.
As a gradio developer, I think we should adopt the best practice put forth in [PEP 621](https://peps.python.org/pep-0621/) and declare project metadata statically in a pyproject.toml (setup.cfg would be ok too).
We should still keep setup.py for editable installs.
**Describe the solution you'd like**
Use pyproject.toml (or setup.cfg) to define project metadata
**Additional context**
[PEP 621](https://peps.python.org/pep-0621/)
| Don't think this is high priority - configuring the build via `setup` in `setup.py` will continue to work for a while but may be best to get ahead of this and adopt the best practice. From the setuptools [docs](https://setuptools.pypa.io/en/latest/userguide/quickstart.html#setup-py):

| 2022-09-10T17:16:43 |
|
gradio-app/gradio | 2,242 | gradio-app__gradio-2242 | [
"2191"
] | 6ee7efa86cd350b588a3589c8390ab9a7d2f10e5 | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -671,7 +671,7 @@ def postprocess_data(self, fn_index, predictions, state):
)
output_index = dependency["outputs"].index(component._id)
reordered_predictions[output_index] = value
- predictions = reordered_predictions
+ predictions = utils.resolve_singleton(reordered_predictions)
elif any(keys_are_blocks):
raise ValueError(
"Returned dictionary included some keys as Components. Either all keys must be Components to assign Component values, or return a List of values to assign output values in order."
| diff --git a/test/test_blocks.py b/test/test_blocks.py
--- a/test/test_blocks.py
+++ b/test/test_blocks.py
@@ -279,6 +279,25 @@ def test_blocks_does_not_replace_keyword_literal():
assert output[0]["value"] == "NO_VALUE"
+def test_blocks_returns_correct_output_dict_single_key():
+
+ with gr.Blocks() as demo:
+ num = gr.Number()
+ num2 = gr.Number()
+ update = gr.Button(value="update")
+
+ def update_values():
+ return {num2: gr.Number.update(value=42)}
+
+ update.click(update_values, inputs=[num], outputs=[num2])
+
+ output = demo.postprocess_data(0, {num2: gr.Number.update(value=42)}, state=None)
+ assert output[0]["value"] == 42
+
+ output = demo.postprocess_data(0, {num2: 23}, state=None)
+ assert output[0] == 23
+
+
class TestCallFunction:
@pytest.mark.asyncio
async def test_call_regular_function(self):
| Returning a dict from a function doesn't work as expected when the dict only has one key
### Describe the bug
When you return a dictionary from a function, the component may not be updated as expected if the dictionary has one key.
This is because the dict will get converted into a tuple of lists as opposed to a list with one element.
The following code handles dict outputs and you can see that it converts the predictions into a list
```python
if type(predictions) is dict and len(predictions) > 0:
keys_are_blocks = [isinstance(key, Block) for key in predictions.keys()]
if all(keys_are_blocks):
reordered_predictions = [skip() for _ in dependency["outputs"]]
for component, value in predictions.items():
if component._id not in dependency["outputs"]:
return ValueError(
f"Returned component {component} not specified as output of function."
)
output_index = dependency["outputs"].index(component._id)
reordered_predictions[output_index] = value
predictions = reordered_predictions
```
Then we convert that to a tuple:
```python
if len(dependency["outputs"]) == 1:
predictions = (predictions,)
```
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```python
import gradio as gr
import random
import string
with gr.Blocks() as demo:
num = gr.Number()
text = gr.Textbox()
update = gr.Button(value="update")
def update_values():
return {text: random.choice(string.ascii_lowercase)}
update.click(update_values, inputs=None, outputs=[text])
demo.launch()
```
### Screenshot
You can see that the text displays `['v']` as opposed to just the character "v"

### Logs
```shell
-
```
### System Info
```shell
3.2
```
### Severity
annoying
| 2022-09-12T19:44:24 |
|
gradio-app/gradio | 2,249 | gradio-app__gradio-2249 | [
"2245"
] | 361e461b979e77fc2d9b11b9706683b8ee188b5f | diff --git a/scripts/delete_old_spaces.py b/scripts/delete_old_spaces.py
new file mode 100644
--- /dev/null
+++ b/scripts/delete_old_spaces.py
@@ -0,0 +1,47 @@
+import argparse
+import datetime
+from typing import Optional
+
+from huggingface_hub import HfApi
+
+
+def delete_space(space_id: str, hf_token: str, api_client: Optional[HfApi] = None):
+ api_client = api_client or HfApi()
+ api_client.delete_repo(repo_id=space_id, token=hf_token, repo_type="space")
+
+
+def get_spaces_to_delete(
+ n_days: int, org_name: str, api_client: Optional[HfApi] = None
+):
+ api_client = api_client or HfApi()
+ spaces = api.list_spaces(author=org_name)
+ spaces_to_delete = []
+ for space in spaces:
+ last_modified = api_client.space_info(space.id).lastModified
+ age = (
+ datetime.datetime.now()
+ - datetime.datetime.fromisoformat(last_modified.rsplit(".", 1)[0])
+ ).days
+ if age > n_days:
+ spaces_to_delete.append(space.id)
+ return spaces_to_delete
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Upload a demo to a space")
+ parser.add_argument(
+ "n_days",
+ type=int,
+ help="Spaces older than n_days will be automatically deleted",
+ )
+ parser.add_argument(
+ "org_name", type=str, help="Name of the author/org to search in"
+ )
+ parser.add_argument("hf_token", type=str, help="HF API token")
+ args = parser.parse_args()
+ api = HfApi()
+
+ to_delete = get_spaces_to_delete(args.n_days, args.org_name, api_client=api)
+ for space in to_delete:
+ print(f"Deleting {space}")
+ delete_space(space, args.hf_token, api_client=api)
| Delete PR Spaces automatically
- [x] I have searched to see if a similar issue already exists.
We should delete the spaces created automatically during PR reviews after the PR is merged or after some given amount of time has passed after the PR has been merged. Keeping around the spaces post merge for a bit may help keep a history of the demos in case we realize something is broken.
| 2022-09-13T14:57:41 |
||
gradio-app/gradio | 2,250 | gradio-app__gradio-2250 | [
"2160"
] | f43481c18ac6468fbf30bf9a80981b7eab453961 | diff --git a/demo/reverse_audio/run.py b/demo/reverse_audio/run.py
--- a/demo/reverse_audio/run.py
+++ b/demo/reverse_audio/run.py
@@ -14,7 +14,7 @@ def reverse_audio(audio):
inputs="microphone",
outputs="audio",
examples=[
- "https://file-examples.com/storage/fe6d784fb46320d949c245e/2017/11/file_example_MP3_700KB.mp3",
+ "https://samplelib.com/lib/preview/mp3/sample-3s.mp3",
os.path.join(os.path.dirname(__file__), "audio/recording1.wav")
], cache_examples=True)
diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -413,6 +413,7 @@ def __init__(
self.app_id = random.getrandbits(64)
self.temp_dirs = set()
self.title = title
+ self.show_api = True
data = {
"mode": self.mode,
@@ -785,6 +786,7 @@ def get_config_file(self):
"is_space": self.is_space,
"enable_queue": getattr(self, "enable_queue", False), # launch attributes
"show_error": getattr(self, "show_error", False),
+ "show_api": self.show_api,
}
def getLayout(block):
@@ -946,6 +948,7 @@ def launch(
ssl_certfile: Optional[str] = None,
ssl_keyfile_password: Optional[str] = None,
quiet: bool = False,
+ show_api: bool = True,
_frontend: bool = True,
) -> Tuple[FastAPI, str, str]:
"""
@@ -974,6 +977,7 @@ def launch(
ssl_certfile: If a path to a file is provided, will use this as the signed certificate for https. Needs to be provided if ssl_keyfile is provided.
ssl_keyfile_password: If a password is provided, will use this with the ssl certificate for https.
quiet: If True, suppresses most print statements.
+ show_api: If True, shows the api docs in the footer of the app. Default True.
Returns:
app: FastAPI app object that is running the demo
local_url: Locally accessible link to the demo
@@ -1000,6 +1004,7 @@ def reverse(text):
self.height = height
self.width = width
self.favicon_path = favicon_path
+ self.show_api = show_api
if enable_queue is not None:
self.enable_queue = enable_queue
warnings.warn(
| diff --git a/test/test_blocks.py b/test/test_blocks.py
--- a/test/test_blocks.py
+++ b/test/test_blocks.py
@@ -108,6 +108,9 @@ def fake_func():
config = demo.get_config_file()
self.assertTrue(assert_configs_are_equivalent_besides_ids(XRAY_CONFIG, config))
+ assert config["show_api"] is True
+ _ = demo.launch(prevent_thread_lock=True, show_api=False)
+ assert demo.config["show_api"] is False
def test_load_from_config(self):
def update(name):
| Show "view API" button with a helpful message even with no named API routes
It has been pretty confusing for our users when they see some Gradio demos with the "view API" link and others without it. The reason this happens is that when there are no named API routes, we do not show the "view API" link. This is the case in Blocks (by default if there is no `api_name` attached to any event listener), while in Interface, there is always at least one named API.
To avoid this confusion, I would propose that we always show the "view API" link, but if there is no named API then clicking on the "view API" link includes a message that says "this demo has no named API routes" and link to the documentation for named API routes.
Related discussion: https://huggingface.slack.com/archives/C02QNREU24S/p1662068541775709
| 2022-09-13T16:55:06 |
|
gradio-app/gradio | 2,279 | gradio-app__gradio-2279 | [
"2239"
] | 6c33eed9762d108931ca643f4e0d141c89ee5aaf | diff --git a/demo/fake_gan/run.py b/demo/fake_gan/run.py
--- a/demo/fake_gan/run.py
+++ b/demo/fake_gan/run.py
@@ -2,7 +2,6 @@
# python demo/fake_gan/run.py
import os
import random
-import time
import gradio as gr
diff --git a/gradio/examples.py b/gradio/examples.py
--- a/gradio/examples.py
+++ b/gradio/examples.py
@@ -187,6 +187,20 @@ def __init__(
[ex for (ex, keep) in zip(example, input_has_examples) if keep]
for example in self.processed_examples
]
+ if cache_examples:
+ for ex in non_none_examples:
+ if (
+ len([sample for sample in ex if sample is not None])
+ != self.inputs_with_examples
+ ):
+ warnings.warn(
+ "Examples are being cached but not all input components have "
+ "example values. This may result in an exception being thrown by "
+ "your function. If you do get an error while caching examples, make "
+ "sure all of your inputs have example values for all of your examples "
+ "or you provide default values for those particular parameters in your function."
+ )
+ break
self.dataset = Dataset(
components=inputs_with_examples,
| diff --git a/test/test_examples.py b/test/test_examples.py
--- a/test/test_examples.py
+++ b/test/test_examples.py
@@ -1,4 +1,5 @@
import os
+from unittest.mock import patch
import pytest
@@ -89,3 +90,62 @@ async def test_caching(self):
prediction = await io.examples_handler.load_from_cache(1)
io.close()
assert prediction[0] == "Hello Dunya"
+
+
+def test_raise_helpful_error_message_if_providing_partial_examples(tmp_path):
+ def foo(a, b):
+ return a + b
+
+ with patch("gradio.examples.CACHED_FOLDER", tmp_path):
+ with pytest.warns(
+ UserWarning,
+ match="^Examples are being cached but not all input components have",
+ ):
+ with pytest.raises(Exception):
+ gr.Interface(
+ foo,
+ inputs=["text", "text"],
+ outputs=["text"],
+ examples=[["foo"], ["bar"]],
+ cache_examples=True,
+ )
+
+ with pytest.warns(
+ UserWarning,
+ match="^Examples are being cached but not all input components have",
+ ):
+ with pytest.raises(Exception):
+ gr.Interface(
+ foo,
+ inputs=["text", "text"],
+ outputs=["text"],
+ examples=[["foo", "bar"], ["bar", None]],
+ cache_examples=True,
+ )
+
+ def foo_no_exception(a, b=2):
+ return a * b
+
+ gr.Interface(
+ foo_no_exception,
+ inputs=["text", "number"],
+ outputs=["text"],
+ examples=[["foo"], ["bar"]],
+ cache_examples=True,
+ )
+
+ def many_missing(a, b, c):
+ return a * b
+
+ with pytest.warns(
+ UserWarning,
+ match="^Examples are being cached but not all input components have",
+ ):
+ with pytest.raises(Exception):
+ gr.Interface(
+ many_missing,
+ inputs=["text", "number", "number"],
+ outputs=["text"],
+ examples=[["foo", None, None], ["bar", 2, 3]],
+ cache_examples=True,
+ )
| Interfaces which provide partial examples will crash on Spaces
### Describe the bug
If you define an interface with a partial list of examples, then the Interface will crash on start-up if you try to run it on spaces since cache_examples=True by default but not all inputs have been provided.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```python
import os
import gradio as gr
os.environ['SYSTEM'] = "spaces"
def repeat_text(text, times):
return text * times
gr.Interface(repeat_text,
inputs=[gr.Textbox(), gr.Number(precision=0)],
outputs=gr.Textbox(),
examples=[["hello"], ["world"]]).launch()
```
Then try to run it
### Screenshot
-
### Logs
```shell
Caching examples at: '/Users/freddy/sources/scratch/gradio_cached_examples/16/log.csv'
Traceback (most recent call last):
File "multiplier.py", line 9, in <module>
gr.Interface(repeat_text,
File "/Users/freddy/sources/gradio/gradio/interface.py", line 626, in __init__
self.examples_handler = Examples(
File "/Users/freddy/sources/gradio/gradio/examples.py", line 56, in create_examples
utils.synchronize_async(examples_obj.create)
File "/Users/freddy/sources/gradio/gradio/utils.py", line 364, in synchronize_async
return fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args, **kwargs)
File "/Users/freddy/miniconda3/envs/gradio/lib/python3.8/site-packages/fsspec/asyn.py", line 96, in sync
raise return_result
File "/Users/freddy/miniconda3/envs/gradio/lib/python3.8/site-packages/fsspec/asyn.py", line 53, in _runner
result[0] = await coro
File "/Users/freddy/sources/gradio/gradio/examples.py", line 208, in create
await self.cache_interface_examples()
File "/Users/freddy/sources/gradio/gradio/examples.py", line 246, in cache_interface_examples
prediction = await self.predict_example(example_id)
File "/Users/freddy/sources/gradio/gradio/examples.py", line 263, in predict_example
predictions = await anyio.to_thread.run_sync(self.fn, *processed_input)
File "/Users/freddy/miniconda3/envs/gradio/lib/python3.8/site-packages/anyio/to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/Users/freddy/miniconda3/envs/gradio/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "/Users/freddy/miniconda3/envs/gradio/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 867, in run
result = context.run(func, *args)
TypeError: repeat_text() missing 1 required positional argument: 'times'
```
Relevant forum discussion: https://discuss.huggingface.co/t/problem-showing-mp4-examples-on-spaces/22801/8
### System Info
```shell
3.2
```
### Severity
serious, but I can work around it
| 2022-09-15T20:58:28 |
|
gradio-app/gradio | 2,286 | gradio-app__gradio-2286 | [
"2282"
] | 5dcc5617ca78fb5291287fa085f700db97d72e90 | diff --git a/gradio/queue.py b/gradio/queue.py
--- a/gradio/queue.py
+++ b/gradio/queue.py
@@ -36,7 +36,7 @@ def __init__(
self.max_thread_count = concurrency_count
self.data_gathering_start = data_gathering_start
self.update_intervals = update_intervals
- self.active_jobs: List[None | Event] = [None]
+ self.active_jobs: List[None | Event] = [None] * concurrency_count
self.delete_lock = asyncio.Lock()
self.server_path = None
self.duration_history_total = 0
| diff --git a/test/test_queue.py b/test/test_queue.py
--- a/test/test_queue.py
+++ b/test/test_queue.py
@@ -136,6 +136,18 @@ async def test_send_estimation(self, queue: Queue, mock_event: Event):
assert estimation.rank == 2
assert estimation.rank_eta == 15
+ @pytest.mark.asyncio
+ async def queue_sets_concurrency_count(self):
+ queue_object = Queue(
+ live_updates=True,
+ concurrency_count=5,
+ data_gathering_start=1,
+ update_intervals=1,
+ max_size=None,
+ )
+ assert len(queue_object.active_jobs) == 5
+ queue_object.close()
+
class TestQueueProcessEvents:
@pytest.mark.asyncio
| `concurrency_count` broke on 3.3
### Describe the bug
`concurrency_count` is not working on version 3.3. It adds persons to the queue instead of executing the queue in parallel
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Any execution with `concurrency_count` don't run in parallel on 3.3. It works on 3.1.7 and 3.2
### Screenshot
_No response_
### Logs
```shell
No logs
```
### System Info
```shell
3.3+
```
### Severity
blocking upgrade to latest gradio version
| Yikes, we'll take a look. Is there code / a Space you can point me to make reproducing faster?
Because it is broken, I have not let it on any live Spaces, but I think you can reproduce very minimally by setting just taking any demo and adding the `concurrency_count`, the requests still run in sequence
I made a Space with a minimal demo just to exemplify: https://huggingface.co/spaces/multimodalart/saymyname | 2022-09-16T22:12:53 |
gradio-app/gradio | 2,306 | gradio-app__gradio-2306 | [
"1718"
] | 40febc35848290074c2d5525a333b480b0094c42 | diff --git a/scripts/format_release_notes.py b/scripts/format_release_notes.py
new file mode 100644
--- /dev/null
+++ b/scripts/format_release_notes.py
@@ -0,0 +1,50 @@
+import shutil
+import pathlib
+import argparse
+import textwrap
+
+current_dir = (pathlib.Path(__file__).parent / "..").resolve()
+
+TEMPLATE = """# Upcoming Release
+
+## New Features:
+No changes to highlight.
+
+## Bug Fixes:
+No changes to highlight.
+
+## Documentation Changes:
+No changes to highlight.
+
+## Testing and Infrastructure Changes:
+No changes to highlight.
+
+## Breaking Changes:
+No changes to highlight.
+
+## Full Changelog:
+No changes to highlight.
+
+## Contributors Shoutout:
+No changes to highlight.
+
+
+"""
+
+
+def format_release_notes(latest_version: str):
+ upcoming = current_dir / "CHANGELOG.md"
+ with open(upcoming, "r") as latest:
+ lines = latest.readlines()
+ assert lines[0] == "# Upcoming Release \n"
+ with open(upcoming, "w") as latest:
+ lines[0] = latest_version.replace("v", "# Version ") + "\n"
+ lines = textwrap.dedent(TEMPLATE).splitlines(keepends=True) + lines
+ latest.writelines(lines)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Upload a demo to a space")
+ parser.add_argument("latest_version", type=str, help="Name of demo to upload")
+ args = parser.parse_args()
+ format_release_notes(args.latest_version)
diff --git a/website/homepage/build.py b/website/homepage/build.py
--- a/website/homepage/build.py
+++ b/website/homepage/build.py
@@ -1,7 +1,7 @@
import os
import shutil
import jinja2
-from src import index, guides, docs, demos
+from src import index, guides, docs, demos, changelog
SRC_DIR = "src"
BUILD_DIR = "build"
@@ -17,4 +17,5 @@
guides.build(BUILD_DIR, jinja_env)
docs.build(BUILD_DIR, jinja_env)
demos.build(BUILD_DIR, jinja_env)
+changelog.build(BUILD_DIR, jinja_env)
diff --git a/website/homepage/src/changelog/__init__.py b/website/homepage/src/changelog/__init__.py
new file mode 100644
--- /dev/null
+++ b/website/homepage/src/changelog/__init__.py
@@ -0,0 +1,55 @@
+import os
+import markdown2
+import shutil
+import re
+
+DIR = os.path.dirname(__file__)
+INNER_TEMPLATE_FILE = os.path.join(DIR, "inner_template.html")
+CHANGELOG_FILE = os.path.join(DIR, "..", "..", "..", "..", "CHANGELOG.md")
+
+def render_md():
+ with open(CHANGELOG_FILE, "r") as change_file:
+ content = change_file.read()
+
+ # replace code blocks correctly
+ content = re.sub(
+ r"```([a-z]+)\n",
+ lambda x: f"<div class='codeblock'><pre><code class='lang-{x.group(1)}'>",
+ content,
+ )
+ content = re.sub(r"```", "</code></pre></div>", content)
+
+ # remove empty/unused sections
+ content = re.sub(r"## [\w^:\n ]*No changes to highlight.", "", content)
+
+ # get versions and their correct href
+ versions = re.findall(r"# Version \d\.\d[^\n ]*", content)
+ versions = [("Upcoming Release", "upcoming-release")] + [("v" + v.strip("# Version "), "version-" + v.strip("# Version ").replace('.','')) for v in versions]
+
+
+ content_html = markdown2.markdown(
+ content,
+ extras=[
+ "target-blank-links",
+ "header-ids",
+ "tables",
+ "fenced-code-blocks",
+ ],
+ )
+
+ with open(INNER_TEMPLATE_FILE, "w+") as temp_html:
+ temp_html.write(content_html)
+
+ return versions
+
+
+def build(output_dir, jinja_env):
+ versions = render_md()
+ os.makedirs(output_dir, exist_ok=True)
+ template = jinja_env.get_template("changelog/parent_template.html")
+ output = template.render(versions=versions)
+ output_folder = os.path.join(output_dir, "changelog")
+ os.makedirs(output_folder)
+ output_file = os.path.join(output_folder, "index.html")
+ with open(output_file, "w") as index_html:
+ index_html.write(output)
| Add release notes for every release
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
As a gradio user, it is unclear to me what changes were made in between gradio releases.
**Describe the solution you'd like**
I would like to see release notes for every release on github and or the gradio.app website so I can determine if I should upgrade to a new version of gradio.
**Additional context**
Related to #1646
| cc @aliabd
@pngwn I recall you said something about not liking automated release notes?
I haven't used them before, but I think Github's [automated release notes](https://docs.github.com/en/repositories/releasing-projects-on-github/automatically-generated-release-notes) feature could be a quick way to start providing more info to uses regarding changes.
And it seems to integrate nicely with the action we're using to create the github relesae. All it seems we have to do is start labelling our PRs with what kind of change they are.
Curious on your thoughts, or anyone else's! | 2022-09-20T19:48:33 |
|
gradio-app/gradio | 2,434 | gradio-app__gradio-2434 | [
"2417"
] | ad2c0790b4164def8a974bdd012903d1647bcf8c | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -3362,90 +3362,20 @@ def serialize(self, x: Any, load_dir: str = "", called_directly: bool = False):
return files
-class Carousel(IOComponent, Changeable):
+class Carousel(IOComponent, Changeable, SimpleSerializable):
"""
- Component displays a set of output components that can be scrolled through.
- Output type: List[List[Any]]
+ Deprecated Component
"""
def __init__(
self,
- *,
- components: Component | List[Component],
- label: Optional[str] = None,
- show_label: bool = True,
- visible: bool = True,
- elem_id: Optional[str] = None,
+ *args,
**kwargs,
):
- """
- Parameters:
- components: Classes of component(s) that will be scrolled through.
- label: component name in interface.
- show_label: if True, will display label.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- warnings.warn(
- "The Carousel component is partially deprecated. It may not behave as expected.",
+ raise DeprecationWarning(
+ "The Carousel component is deprecated. Please consider using the Gallery "
+ "component, which can be used to display images (and optional captions).",
)
- if not isinstance(components, list):
- components = [components]
- self.components = [
- get_component_instance(component) for component in components
- ]
- IOComponent.__init__(
- self,
- label=label,
- show_label=show_label,
- visible=visible,
- elem_id=elem_id,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "components": [component.get_config() for component in self.components],
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: Optional[Any] = _Keywords.NO_VALUE,
- label: Optional[str] = None,
- show_label: Optional[bool] = None,
- visible: Optional[bool] = None,
- ):
- updated_config = {
- "label": label,
- "show_label": show_label,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return updated_config
-
- def postprocess(self, y: List[List[Any]]) -> List[List[Any]]:
- """
- Parameters:
- y: carousel output
- Returns:
- 2D array, where each sublist represents one set of outputs or 'slide' in the carousel
- """
- if y is None:
- return None
- if isinstance(y, list):
- if len(y) != 0 and not isinstance(y[0], list):
- y = [[z] for z in y]
- output = []
- for row in y:
- output_row = []
- for i, cell in enumerate(row):
- output_row.append(self.components[i].postprocess(cell))
- output.append(output_row)
- return output
- else:
- raise ValueError("Unknown type. Please provide a list for the Carousel.")
@document("change", "style")
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -1732,6 +1732,23 @@ def test_static(self):
self.assertEqual(component.get_config().get("value"), "#000000")
+class TestCarousel:
+ def test_deprecation(self):
+ test_file_dir = pathlib.Path(pathlib.Path(__file__).parent, "test_files")
+ with pytest.raises(DeprecationWarning):
+ gr.Carousel([pathlib.Path(test_file_dir, "bus.png")])
+
+ def test_deprecation_in_interface(self):
+ with pytest.raises(DeprecationWarning):
+ gr.Interface(lambda x: ["lion.jpg"], "textbox", "carousel")
+
+ def test_deprecation_in_blocks(self):
+ with pytest.raises(DeprecationWarning):
+ with gr.Blocks():
+ gr.Textbox()
+ gr.Carousel()
+
+
class TestGallery:
@patch("uuid.uuid4", return_value="my-uuid")
def test_gallery(self, mock_uuid):
| TypeError: Can't instantiate abstract class Carousel with abstract methods deserialize, serialize
### Describe the bug
https://huggingface.co/spaces/marcelcastrobr/CLIP-image-search/blob/main/app.py
outputs=gr.outputs.Carousel(gr.outputs.Image(type="pil"))
error:
TypeError: Can't instantiate abstract class Carousel with abstract methods deserialize, serialize
what is the problem?
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
https://huggingface.co/spaces/marcelcastrobr/CLIP-image-search/blob/main/app.py
### Screenshot
_No response_
### Logs
```shell
TypeError: Can't instantiate abstract class Carousel with abstract methods deserialize, serialize
```
### System Info
```shell
3.4.1
```
### Severity
annoying
| Hi @joytianya the `Carousel` class is basically deprecated (see #1252). We'll formally deprecate it soon.
What kind of data were you showing in the `Carousel`? If it was images, please consider using the `Gallery` component instead.
yes, it was images, thank you~
and for Gallery, can it be displayed in pagination?
No the `Gallery` component does not support pagination, but it can be configured to show different numbers of images per row.
ok, Is there any way to support the pagination? | 2022-10-11T22:45:23 |
gradio-app/gradio | 2,475 | gradio-app__gradio-2475 | [
"1804"
] | f79a76ca8f6e081310c01c7cd8b2b3c13eb0c0b0 | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -1640,25 +1640,26 @@ def preprocess(self, x: Dict[str, str] | None) -> str | None:
file_data, file_path=file_name
)
- file_name = file.name
- uploaded_format = file_name.split(".")[-1].lower()
-
- if self.format is not None and uploaded_format != self.format:
- output_file_name = file_name[0 : file_name.rindex(".") + 1] + self.format
- ff = FFmpeg(inputs={file_name: None}, outputs={output_file_name: None})
- ff.run()
- return output_file_name
- elif self.source == "webcam" and self.mirror_webcam is True:
- path = Path(file_name)
- output_file_name = str(path.with_stem(f"{path.stem}_flip"))
+ file_name = Path(file.name)
+ uploaded_format = file_name.suffix.replace(".", "")
+
+ modify_format = self.format is not None and uploaded_format != self.format
+ flip = self.source == "webcam" and self.mirror_webcam
+ if modify_format or flip:
+ format = f".{self.format if modify_format else uploaded_format}"
+ output_options = ["-vf", "hflip", "-c:a", "copy"] if flip else None
+ flip_suffix = "_flip" if flip else ""
+ output_file_name = str(
+ file_name.with_name(f"{file_name.stem}{flip_suffix}{format}")
+ )
ff = FFmpeg(
- inputs={file_name: None},
- outputs={output_file_name: ["-vf", "hflip", "-c:a", "copy"]},
+ inputs={str(file_name): None},
+ outputs={output_file_name: output_options},
)
ff.run()
return output_file_name
else:
- return file_name
+ return str(file_name)
def generate_sample(self):
"""Generates a random video for testing the API."""
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -1234,7 +1234,9 @@ def test_component_functions(self):
x_video["is_example"] = True
self.assertIsNotNone(video_input.preprocess(x_video))
video_input = gr.Video(format="avi")
- self.assertEqual(video_input.preprocess(x_video)[-3:], "avi")
+ output_video = video_input.preprocess(x_video)
+ self.assertEqual(output_video[-3:], "avi")
+ assert "flip" not in output_video
assert filecmp.cmp(
video_input.serialize(x_video["name"])["name"], x_video["name"]
@@ -1291,6 +1293,45 @@ def test_video_postprocess_converts_to_playable_format(self):
)
assert processing_utils.video_is_playable(str(full_path_to_output))
+ @patch("gradio.components.FFmpeg")
+ def test_video_preprocessing_flips_video_for_webcam(self, mock_ffmpeg):
+ x_video = deepcopy(media_data.BASE64_VIDEO)
+ video_input = gr.Video(source="webcam")
+ _ = video_input.preprocess(x_video)
+
+ # Dict mapping filename to FFmpeg options
+ output_params = mock_ffmpeg.call_args_list[0][1]["outputs"]
+ assert "hflip" in list(output_params.values())[0]
+ assert "flip" in list(output_params.keys())[0]
+
+ mock_ffmpeg.reset_mock()
+ _ = gr.Video(source="webcam", mirror_webcam=False).preprocess(x_video)
+ mock_ffmpeg.assert_not_called()
+
+ mock_ffmpeg.reset_mock()
+ _ = gr.Video(source="upload", format="mp4").preprocess(x_video)
+ mock_ffmpeg.assert_not_called()
+
+ mock_ffmpeg.reset_mock()
+ output_file = gr.Video(
+ source="webcam", mirror_webcam=True, format="avi"
+ ).preprocess(x_video)
+ output_params = mock_ffmpeg.call_args_list[0][1]["outputs"]
+ assert "hflip" in list(output_params.values())[0]
+ assert "flip" in list(output_params.keys())[0]
+ assert ".avi" in list(output_params.keys())[0]
+ assert ".avi" in output_file
+
+ mock_ffmpeg.reset_mock()
+ output_file = gr.Video(
+ source="webcam", mirror_webcam=False, format="avi"
+ ).preprocess(x_video)
+ output_params = mock_ffmpeg.call_args_list[0][1]["outputs"]
+ assert list(output_params.values())[0] is None
+ assert "flip" not in list(output_params.keys())[0]
+ assert ".avi" in list(output_params.keys())[0]
+ assert ".avi" in output_file
+
class TestTimeseries(unittest.TestCase):
def test_component_functions(self):
| Video mirroring
### Describe the bug
Upload a video, and the output video is a mirror image.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```python
import gradio as gr
def video_test(video):
return video
inputs_video_p5 = gr.Video(format="mp4", source="upload", label="原始视频") # webcam
outputs_webcam_p5 = gr.Video(format='mp4', label="检测视频")
gyd = gr.Interface(
fn=video_test,
inputs=[inputs_video_p5],
outputs=[outputs_webcam_p5],
)
gyd.launch(
inbrowser=True,
)
```
### Screenshot

### Logs
```shell
see image above
```
### System Info
```shell
gradio 3.0.25
ubuntu 20.04
chrome
```
### Severity
annoying
| Hi @Zengyf-CVer yes this is the default behavior of the webcam component, but can be changed by setting `mirror_webcam=False` in the `Video()` constructor
How do we enable the following behavior?
- if the video is from webcam, mirror it
- if the video is selected from example, do not mirror it
Thanks @mistycheney, that seems like reasonable default behavior to me, what do you think @pngwn?
Yeah, that really should be the default, i kinda thought it was. Current behaviour feels like a bug.
I can't reproduce this on `main` with the example code. I can also confirm that only webcam videos are mirrored nothing else.
https://user-images.githubusercontent.com/12937446/195094030-32702d67-2459-4a7b-bc63-3639c86c201f.mov
We have had multiple reports of this now though, so I'm not sure is happening.
For what its worth, I do get the flipping behavior running on Windows with ffmpeg version 4.4
<img width="961" alt="image" src="https://user-images.githubusercontent.com/1778297/195966062-9dc8e144-2ccb-4ffd-80a9-e14d8010397b.png">
If someone who can reproduce this can pick it up, that would be good. I'm not seeing this behaviour. Not sure why.
The problem will be in the backend (in the pre/postprocessor) if it exists.
I think I know what's happening. Will put up a PR today and tag you @pngwn !
How do you debug these mysterious issues @freddyaboulton 👀 | 2022-10-17T17:11:49 |
gradio-app/gradio | 2,477 | gradio-app__gradio-2477 | [
"2427"
] | 8a34a799d67ca28f4035f220ee09268f7582b42e | diff --git a/gradio/routes.py b/gradio/routes.py
--- a/gradio/routes.py
+++ b/gradio/routes.py
@@ -461,7 +461,8 @@ def read_main():
app = gr.mount_gradio_app(app, io, path="/gradio")
# Then run `uvicorn run:app` from the terminal and navigate to http://localhost:8000/gradio.
"""
-
+ blocks.dev_mode = False
+ blocks.config = blocks.get_config_file()
gradio_app = App.create_app(blocks)
@app.on_event("startup")
| diff --git a/test/test_routes.py b/test/test_routes.py
--- a/test/test_routes.py
+++ b/test/test_routes.py
@@ -6,10 +6,12 @@
from unittest.mock import patch
import pytest
+import starlette.routing
import websockets
from fastapi import FastAPI
from fastapi.testclient import TestClient
+import gradio
from gradio import Blocks, Interface, Textbox, close_all, routes
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
@@ -260,5 +262,22 @@ def test_get_server_url_from_ws_url(ws_url, answer):
assert routes.get_server_url_from_ws_url(ws_url) == answer
+def test_mount_gradio_app_set_dev_mode_false():
+ app = FastAPI()
+
+ @app.get("/")
+ def read_main():
+ return {"message": "Hello!"}
+
+ with gradio.Blocks() as blocks:
+ gradio.Textbox("Hello from gradio!")
+
+ app = routes.mount_gradio_app(app, blocks, path="/gradio")
+ gradio_fast_api = next(
+ route for route in app.routes if isinstance(route, starlette.routing.Mount)
+ )
+ assert not gradio_fast_api.app.blocks.dev_mode
+
+
if __name__ == "__main__":
unittest.main()
| mount_gradio_app causing reload loop
### Describe the bug
Hello. I'm trying to run some apps using mount_gradio_app. When I go to that page it loads perfectly, but is stuck in a reload loop. In the network tab of the console the `/app_id` call is being made every ~500ms, it succeeds, gets the same ID returned, but keeps going.
I have a simple page showing at `/` (`read_main`) and this works correctly. It is only the sub-APIs that have the reload-loop issue.
Int eh example below `/` has no issues, but `/words` (and any other sub API I try) gets caught in this loop.
Any help greatly appreciated. This is for an internal demo that won't be accessible on the internet (But will be hosted in k8s on an internal VPN), so it doesn't need to be full prod ready.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Here's a simple version:
main.py:
```
#!/usr/bin/env python3
from fastapi import FastAPI
from fastapi.responses import HTMLResponse
import gradio as gr
from utils.demos import Demos
import sys
app = FastAPI()
@app.get("/", response_class=HTMLResponse)
def read_main():
return """
<html>
<head>
<title>Demo</title>
</head>
<body>
<div>Demo</div>
</body>
</html>
"""
io = gr.Interface(lambda x: "Hello, " + x + "!", "textbox", "textbox")
app = gr.mount_gradio_app(app, demos.get_words_layout(), path="/words")
```
`Dockerfile`
```
# Base Image
FROM python:latest
# set default environment variables
ENV PYTHONUNBUFFERED 1
ENV LANG C.UTF-8
ENV DEBIAN_FRONTEND=noninteractive
# Install system dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
tesseract-ocr \
libtesseract-dev \
nginx \
&& \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# install environment dependencies
COPY ./demos/requirements.txt ./
RUN pip3 install --upgrade pip
RUN pip install -r requirements.txt
RUN ["sh", "-c", "python -m spacy download en_core_web_lg"]
# create and set working directory
RUN mkdir /demos
WORKDIR /demos
COPY ./demos ./
EXPOSE 80
EXPOSE 3000
CMD ["sh", "-c", "service nginx start;uvicorn main:app"]
```
nginx
```
server {
listen 80 default_server;
server_name myserver;
location / {
proxy_pass http://127.0.0.1:8000;
proxy_set_header Host $host;
}
}
```
I'm running the app now with `uvicorn main:app` but I've tried lots of options, tried it programmatically with reload set explicitly to false, and through Gunicorn and a socket. None of these solve the problem.
### Screenshot

### Logs
```shell
Me -> ~/Development/demo -> docker run -p 80:80 -p 3000:3000 -it demo
Starting nginx: nginx.
root@9a8279a9c6cd:/demos# uvicorn main:app
2022-10-11 09:30:54,705 - utils.utils - INFO - log level: INFO
INFO: Started server process [34]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
INFO: 127.0.0.1:52834 - "GET / HTTP/1.0" 200 OK
INFO: 127.0.0.1:52836 - "GET /assets/index.e0c646e0.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52838 - "GET /assets/index.3364fc9e.css HTTP/1.0" 200 OK
INFO: 127.0.0.1:52840 - "GET /app_id HTTP/1.0" 200 OK
INFO: 127.0.0.1:52842 - "GET /assets/index.da18b6d7.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52850 - "GET /assets/index.cc2d8162.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52854 - "GET /assets/index.7dc02e74.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52856 - "GET /assets/index.5fe10dd6.css HTTP/1.0" 200 OK
INFO: 127.0.0.1:52858 - "GET /assets/index.950ac012.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52862 - "GET /assets/styles.ed3b21b5.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52860 - "GET /assets/Column.6bd7c762.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52868 - "GET /assets/Image.1afdf52a.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52864 - "GET /assets/index.2082796c.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52866 - "GET /assets/BlockLabel.0f912175.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52874 - "GET /assets/Block.19cdfe29.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52872 - "GET /assets/Webcam.955b3879.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52870 - "GET /assets/Upload.c52ab777.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52876 - "GET /assets/ModifyUpload.50f1ed7a.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52878 - "GET /assets/Image.70947e40.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52880 - "GET /assets/index.77f8571c.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52884 - "GET /assets/index.adf51434.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52882 - "GET /assets/csv.27f5436c.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52886 - "GET /assets/dsv.7fe76a93.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52888 - "GET /assets/Model3D.b527a682.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52890 - "GET /assets/logo.1a7cafda.svg HTTP/1.0" 200 OK
INFO: 127.0.0.1:52892 - "GET /assets/index.7bbf77fc.js HTTP/1.0" 200 OK
INFO: 127.0.0.1:52896 - "GET /file%3D/demos/utils/Samples/somethign.jpg HTTP/1.0" 200 OK
INFO: 127.0.0.1:52894 - "GET /file%3D/demos/utils/Samples/image-text.jpg HTTP/1.0" 200 OK
INFO: 127.0.0.1:52898 - "GET /app_id HTTP/1.0" 200 OK
INFO: 127.0.0.1:52900 - "GET /app_id HTTP/1.0" 200 OK
INFO: 127.0.0.1:52902 - "GET /app_id HTTP/1.0" 200 OK
INFO: 127.0.0.1:52904 - "GET /app_id HTTP/1.0" 200 OK
INFO: 127.0.0.1:52906 - "GET /app_id HTTP/1.0" 200 OK
INFO: 127.0.0.1:52908 - "GET /app_id HTTP/1.0" 200 OK
INFO: 127.0.0.1:52910 - "GET /app_id HTTP/1.0" 200 OK
INFO: 127.0.0.1:52912 - "GET /app_id HTTP/1.0" 200 OK
INFO: 127.0.0.1:52914 - "GET /app_id HTTP/1.0" 200 OK
INFO: 127.0.0.1:52916 - "GET /app_id HTTP/1.0" 200 OK
INFO: 127.0.0.1:52918 - "GET /app_id HTTP/1.0" 200 OK
INFO: 127.0.0.1:52920 - "GET /app_id HTTP/1.0" 200 OK
INFO: 127.0.0.1:52922 - "GET /app_id HTTP/1.0" 200 OK
INFO: 127.0.0.1:52924 - "GET /app_id HTTP/1.0" 200 OK
INFO: 127.0.0.1:52926 - "GET /app_id HTTP/1.0" 200 OK
INFO: 127.0.0.1:52928 - "GET /app_id HTTP/1.0" 200 OK
...continues forever
with trace:
TRACE: 127.0.0.1:53350 - ASGI [45] Send {'type': 'http.response.start', 'status': 200, 'headers': '<...>'}
INFO: 127.0.0.1:53350 - "GET /app_id HTTP/1.0" 200 OK
TRACE: 127.0.0.1:53350 - ASGI [45] Send {'type': 'http.response.body', 'body': '<30 bytes>'}
TRACE: 127.0.0.1:53350 - ASGI [45] Completed
TRACE: 127.0.0.1:53350 - HTTP connection lost
TRACE: 127.0.0.1:53352 - HTTP connection made
TRACE: 127.0.0.1:53352 - ASGI [46] Started scope={'type': 'http', 'asgi': {'version': '3.0', 'spec_version': '2.3'}, 'http_version': '1.0', 'server': ('127.0.0.1', 8000), 'client': ('127.0.0.1', 53352), 'scheme': 'http', 'method': 'GET', 'root_path': '', 'path': '/words/app_id', 'raw_path': b'/words/app_id', 'query_string': b'', 'headers': '<...>'}
TRACE: 127.0.0.1:53352 - ASGI [46] Send {'type': 'http.response.start', 'status': 200, 'headers': '<...>'}
INFO: 127.0.0.1:53352 - "GET /app_id HTTP/1.0" 200 OK
TRACE: 127.0.0.1:53352 - ASGI [46] Send {'type': 'http.response.body', 'body': '<30 bytes>'}
TRACE: 127.0.0.1:53352 - ASGI [46] Completed
TRACE: 127.0.0.1:53352 - HTTP connection lost
TRACE: 127.0.0.1:53354 - HTTP connection made
TRACE: 127.0.0.1:53354 - ASGI [47] Started scope={'type': 'http', 'asgi': {'version': '3.0', 'spec_version': '2.3'}, 'http_version': '1.0', 'server': ('127.0.0.1', 8000), 'client': ('127.0.0.1', 53354), 'scheme': 'http', 'method': 'GET', 'root_path': '', 'path': '/words/app_id', 'raw_path': b'/words/app_id', 'query_string': b'', 'headers': '<...>'}
TRACE: 127.0.0.1:53354 - ASGI [47] Send {'type': 'http.response.start', 'status': 200, 'headers': '<...>'}
INFO: 127.0.0.1:53354 - "GET /app_id HTTP/1.0" 200 OK
TRACE: 127.0.0.1:53354 - ASGI [47] Send {'type': 'http.response.body', 'body': '<30 bytes>'}
TRACE: 127.0.0.1:53354 - ASGI [47] Completed
TRACE: 127.0.0.1:53354 - HTTP connection lost
TRACE: 127.0.0.1:53356 - HTTP connection made
TRACE: 127.0.0.1:53356 - ASGI [48] Started scope={'type': 'http', 'asgi': {'version': '3.0', 'spec_version': '2.3'}, 'http_version': '1.0', 'server': ('127.0.0.1', 8000), 'client': ('127.0.0.1', 53356), 'scheme': 'http', 'method': 'GET', 'root_path': '', 'path': '/words/app_id', 'raw_path': b'/words/app_id', 'query_string': b'', 'headers': '<...>'}
TRACE: 127.0.0.1:53356 - ASGI [48] Send {'type': 'http.response.start', 'status': 200, 'headers': '<...>'}
INFO: 127.0.0.1:53356 - "GET /app_id HTTP/1.0" 200 OK
TRACE: 127.0.0.1:53356 - ASGI [48] Send {'type': 'http.response.body', 'body': '<30 bytes>'}
TRACE: 127.0.0.1:53356 - ASGI [48] Completed
TRACE: 127.0.0.1:53356 - HTTP connection lost
TRACE: 127.0.0.1:53358 - HTTP connection made
TRACE: 127.0.0.1:53358 - ASGI [49] Started scope={'type': 'http', 'asgi': {'version': '3.0', 'spec_version': '2.3'}, 'http_version': '1.0', 'server': ('127.0.0.1', 8000), 'client': ('127.0.0.1', 53358), 'scheme': 'http', 'method': 'GET', 'root_path': '', 'path': '/words/app_id', 'raw_path': b'/words/app_id', 'query_string': b'', 'headers': '<...>'}
TRACE: 127.0.0.1:53358 - ASGI [49] Send {'type': 'http.response.start', 'status': 200, 'headers': '<...>'}
INFO: 127.0.0.1:53358 - "GET /app_id HTTP/1.0" 200 OK
TRACE: 127.0.0.1:53358 - ASGI [49] Send {'type': 'http.response.body', 'body': '<30 bytes>'}
TRACE: 127.0.0.1:53358 - ASGI [49] Completed
TRACE: 127.0.0.1:53358 - HTTP connection lost
TRACE: 127.0.0.1:53360 - HTTP connection made
TRACE: 127.0.0.1:53360 - ASGI [50] Started scope={'type': 'http', 'asgi': {'version': '3.0', 'spec_version': '2.3'}, 'http_version': '1.0', 'server': ('127.0.0.1', 8000), 'client': ('127.0.0.1', 53360), 'scheme': 'http', 'method': 'GET', 'root_path': '', 'path': '/words/app_id', 'raw_path': b'/words/app_id', 'query_string': b'', 'headers': '<...>'}
TRACE: 127.0.0.1:53360 - ASGI [50] Send {'type': 'http.response.start', 'status': 200, 'headers': '<...>'}
INFO: 127.0.0.1:53360 - "GET /app_id HTTP/1.0" 200 OK
TRACE: 127.0.0.1:53360 - ASGI [50] Send {'type': 'http.response.body', 'body': '<30 bytes>'}
TRACE: 127.0.0.1:53360 - ASGI [50] Completed
TRACE: 127.0.0.1:53360 - HTTP connection lost
^CINFO: Shutting down
INFO: Waiting for application shutdown.
TRACE: ASGI [1] Receive {'type': 'lifespan.shutdown'}
TRACE: ASGI [1] Send {'type': 'lifespan.shutdown.complete'}
TRACE: ASGI [1] Completed
INFO: Application shutdown complete.
INFO: Finished server process [53]
```
```
### System Info
```shell
Docker running on Mac, Dockerfile included above.
```
### Severity
blocking all usage of gradio
| It also does this whenever you run `gradio run.py` ('dev' mode) which makes debugging network requests difficult. It doesn't not cause any issue but it doesn't impair the developer experience. It also doesn't reliably reload the page, so I'm not sure how useful it actually is.
The dev mode functionality should be implemented with websockets rather than polling in this manner.
To close this issue, I think we need to make sure `mount_gradio_app` sets the dev mode to False (currently set to True by default and set to [False](https://github.com/gradio-app/gradio/blob/main/gradio/blocks.py#L468) on `launch`).
Reimplementing dev mode to not reload the page should be left to another issue.
Hey @freddyaboulton , thanks, I had just tracked it down when you commented. I agree with the suggested fix, especially since the behaviour is bizarre and difficult to track down. For now, some updates to the docs might also be helpful. Thanks again! | 2022-10-17T21:39:06 |
gradio-app/gradio | 2,493 | gradio-app__gradio-2493 | [
"2452"
] | 943cdd5f353c33103afd6e401cbc495935bff287 | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -481,6 +481,7 @@ def __init__(
self.share_url = None
self.width = None
self.height = None
+ self.api_open = True
self.ip_address = None
self.is_space = True if os.getenv("SYSTEM") == "spaces" else False
@@ -1029,6 +1030,7 @@ def queue(
status_update_rate: float | str = "auto",
client_position_to_load_data: int = 30,
default_enabled: bool = True,
+ api_open: bool = True,
max_size: Optional[int] = None,
):
"""
@@ -1038,13 +1040,15 @@ def queue(
status_update_rate: If "auto", Queue will send status estimations to all clients whenever a job is finished. Otherwise Queue will send status at regular intervals set by this parameter as the number of seconds.
client_position_to_load_data: Once a client's position in Queue is less that this value, the Queue will collect the input data from the client. You may make this smaller if clients can send large volumes of data, such as video, since the queued data is stored in memory.
default_enabled: If True, all event listeners will use queueing by default.
- max_size: Maximum number of jobs that can be queued at once. Jobs beyond this limit simply return an error message to the user asking them to try again. If None, there is no limit.
+ api_open: If True, the REST routes of the backend will be open, allowing requests made directly to those endpoints to skip the queue.
+ max_size: The maximum number of events the queue will store at any given moment.
Example:
demo = gr.Interface(gr.Textbox(), gr.Image(), image_generator)
demo.queue(concurrency_count=3)
demo.launch()
"""
self.enable_queue = default_enabled
+ self.api_open = api_open
self._queue = queue.Queue(
live_updates=status_update_rate == "auto",
concurrency_count=concurrency_count,
@@ -1108,7 +1112,7 @@ def launch(
ssl_certfile: If a path to a file is provided, will use this as the signed certificate for https. Needs to be provided if ssl_keyfile is provided.
ssl_keyfile_password: If a password is provided, will use this with the ssl certificate for https.
quiet: If True, suppresses most print statements.
- show_api: If True, shows the api docs in the footer of the app. Default True.
+ show_api: If True, shows the api docs in the footer of the app. Default True. If the queue is enabled, then api_open parameter of .queue() will determine if the api docs are shown, independent of the value of show_api.
Returns:
app: FastAPI app object that is running the demo
local_url: Locally accessible link to the demo
@@ -1135,7 +1139,6 @@ def reverse(text):
self.height = height
self.width = width
self.favicon_path = favicon_path
- self.show_api = show_api
if enable_queue is not None:
self.enable_queue = enable_queue
warnings.warn(
@@ -1149,13 +1152,11 @@ def reverse(text):
self.enable_queue = self.enable_queue is True
if self.enable_queue and not hasattr(self, "_queue"):
self.queue()
+ self.show_api = self.api_open if self.enable_queue else show_api
for dep in self.dependencies:
for i in dep["cancels"]:
- queue_status = self.dependencies[i]["queue"]
- if queue_status is False or (
- queue_status is None and not self.enable_queue
- ):
+ if not self.queue_enabled_for_fn(i):
raise ValueError(
"In order to cancel an event, the queue for that event must be enabled! "
"You may get this error by either 1) passing a function that uses the yield keyword "
@@ -1424,3 +1425,8 @@ def startup_events(self):
if self.enable_queue:
utils.run_coro_in_background(self._queue.start)
utils.run_coro_in_background(self.create_limiter)
+
+ def queue_enabled_for_fn(self, fn_index: int):
+ if self.dependencies[fn_index]["queue"] is None:
+ return self.enable_queue
+ return self.dependencies[fn_index]["queue"]
diff --git a/gradio/queue.py b/gradio/queue.py
--- a/gradio/queue.py
+++ b/gradio/queue.py
@@ -63,6 +63,7 @@ def __init__(
self.sleep_when_free = 0.05
self.max_size = max_size
self.blocks_dependencies = blocks_dependencies
+ self.access_token = ""
async def start(self):
run_coro_in_background(self.start_processing)
@@ -78,6 +79,9 @@ def resume(self):
def set_url(self, url: str):
self.server_path = url
+ def set_access_token(self, token: str):
+ self.access_token = token
+
def get_active_worker_count(self) -> int:
count = 0
for worker in self.active_jobs:
@@ -258,6 +262,7 @@ async def call_prediction(self, events: List[Event], batch: bool):
method=Request.Method.POST,
url=f"{self.server_path}api/predict",
json=dict(data),
+ headers={"Authorization": f"Bearer {self.access_token}"},
)
return response
diff --git a/gradio/routes.py b/gradio/routes.py
--- a/gradio/routes.py
+++ b/gradio/routes.py
@@ -72,6 +72,7 @@ def __init__(self, **kwargs):
self.state_holder = {}
self.iterators = defaultdict(dict)
self.lock = asyncio.Lock()
+ self.queue_token = secrets.token_urlsafe(32)
super().__init__(**kwargs)
def configure_app(self, blocks: gradio.Blocks) -> None:
@@ -85,6 +86,8 @@ def configure_app(self, blocks: gradio.Blocks) -> None:
self.auth = None
self.blocks = blocks
+ if hasattr(self.blocks, "_queue"):
+ self.blocks._queue.set_access_token(self.queue_token)
self.cwd = os.getcwd()
self.favicon_path = blocks.favicon_path
self.tokens = {}
@@ -303,7 +306,10 @@ async def run_predict(
@app.post("/api/{api_name}", dependencies=[Depends(login_check)])
@app.post("/api/{api_name}/", dependencies=[Depends(login_check)])
async def predict(
- api_name: str, body: PredictBody, username: str = Depends(get_current_user)
+ api_name: str,
+ body: PredictBody,
+ request: Request,
+ username: str = Depends(get_current_user),
):
if body.fn_index is None:
for i, fn in enumerate(app.blocks.dependencies):
@@ -317,6 +323,15 @@ async def predict(
},
status_code=500,
)
+ if not app.blocks.api_open and app.blocks.queue_enabled_for_fn(
+ body.fn_index
+ ):
+ if f"Bearer {app.queue_token}" != request.headers.get("Authorization"):
+ raise HTTPException(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ detail="Not authorized to skip the queue",
+ )
+
# If this fn_index cancels jobs, then the only input we need is the
# current session hash
if app.blocks.dependencies[body.fn_index]["cancels"]:
| diff --git a/test/test_blocks.py b/test/test_blocks.py
--- a/test/test_blocks.py
+++ b/test/test_blocks.py
@@ -17,7 +17,6 @@
import gradio as gr
import gradio.events
-from gradio.blocks import Block
from gradio.exceptions import DuplicateBlockError
from gradio.routes import PredictBody
from gradio.test_data.blocks_configs import XRAY_CONFIG
@@ -788,5 +787,21 @@ def iteration(a):
demo.queue().launch(prevent_thread_lock=True)
+def test_queue_enabled_for_fn():
+ with gr.Blocks() as demo:
+ input = gr.Textbox()
+ output = gr.Textbox()
+ number = gr.Number()
+ button = gr.Button()
+ button.click(lambda x: f"Hello, {x}!", input, output)
+ button.click(lambda: 42, None, number, queue=True)
+
+ assert not demo.queue_enabled_for_fn(0)
+ assert demo.queue_enabled_for_fn(1)
+ demo.queue()
+ assert demo.queue_enabled_for_fn(0)
+ assert demo.queue_enabled_for_fn(1)
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/test/test_routes.py b/test/test_routes.py
--- a/test/test_routes.py
+++ b/test/test_routes.py
@@ -12,7 +12,7 @@
from fastapi.testclient import TestClient
import gradio as gr
-from gradio import Blocks, Interface, Textbox, close_all, routes
+from gradio import Blocks, Button, Interface, Number, Textbox, close_all, routes
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
@@ -183,6 +183,7 @@ def generator(string):
response = client.post(
"/api/predict/",
json={"data": ["abc"], "fn_index": 0, "session_hash": "11"},
+ headers={"Authorization": f"Bearer {app.queue_token}"},
)
output = dict(response.json())
assert output["data"] == ["a"]
@@ -190,6 +191,7 @@ def generator(string):
response = client.post(
"/api/predict/",
json={"data": ["abc"], "fn_index": 0, "session_hash": "11"},
+ headers={"Authorization": f"Bearer {app.queue_token}"},
)
output = dict(response.json())
assert output["data"] == ["b"]
@@ -197,6 +199,7 @@ def generator(string):
response = client.post(
"/api/predict/",
json={"data": ["abc"], "fn_index": 0, "session_hash": "11"},
+ headers={"Authorization": f"Bearer {app.queue_token}"},
)
output = dict(response.json())
assert output["data"] == ["c"]
@@ -204,6 +207,7 @@ def generator(string):
response = client.post(
"/api/predict/",
json={"data": ["abc"], "fn_index": 0, "session_hash": "11"},
+ headers={"Authorization": f"Bearer {app.queue_token}"},
)
output = dict(response.json())
assert output["data"] == [None]
@@ -211,6 +215,7 @@ def generator(string):
response = client.post(
"/api/predict/",
json={"data": ["abc"], "fn_index": 0, "session_hash": "11"},
+ headers={"Authorization": f"Bearer {app.queue_token}"},
)
output = dict(response.json())
assert output["data"] == ["a"]
@@ -307,5 +312,77 @@ def read_main():
assert not gradio_fast_api.app.blocks.dev_mode
+def test_predict_route_is_blocked_if_api_open_false():
+ io = Interface(lambda x: x, "text", "text", examples=[["freddy"]]).queue(
+ api_open=False
+ )
+ app, _, _ = io.launch(prevent_thread_lock=True)
+ assert not io.show_api
+ client = TestClient(app)
+ result = client.post(
+ "/api/predict", json={"fn_index": 0, "data": [5], "session_hash": "foo"}
+ )
+ assert result.status_code == 401
+
+
+def test_predict_route_not_blocked_if_queue_disabled():
+ with Blocks() as demo:
+ input = Textbox()
+ output = Textbox()
+ number = Number()
+ button = Button()
+ button.click(
+ lambda x: f"Hello, {x}!", input, output, queue=False, api_name="not_blocked"
+ )
+ button.click(lambda: 42, None, number, queue=True, api_name="blocked")
+ app, _, _ = demo.queue(api_open=False).launch(
+ prevent_thread_lock=True, show_api=True
+ )
+ assert not demo.show_api
+ client = TestClient(app)
+
+ result = client.post("/api/blocked", json={"data": [], "session_hash": "foo"})
+ assert result.status_code == 401
+ result = client.post(
+ "/api/not_blocked", json={"data": ["freddy"], "session_hash": "foo"}
+ )
+ assert result.status_code == 200
+ assert result.json()["data"] == ["Hello, freddy!"]
+
+
+def test_predict_route_not_blocked_if_routes_open():
+ with Blocks() as demo:
+ input = Textbox()
+ output = Textbox()
+ button = Button()
+ button.click(
+ lambda x: f"Hello, {x}!", input, output, queue=True, api_name="not_blocked"
+ )
+ app, _, _ = demo.queue(api_open=True).launch(
+ prevent_thread_lock=True, show_api=False
+ )
+ assert demo.show_api
+ client = TestClient(app)
+
+ result = client.post(
+ "/api/not_blocked", json={"data": ["freddy"], "session_hash": "foo"}
+ )
+ assert result.status_code == 200
+ assert result.json()["data"] == ["Hello, freddy!"]
+
+ demo.close()
+ demo.queue(api_open=False).launch(prevent_thread_lock=True, show_api=False)
+ assert not demo.show_api
+
+
+def test_show_api_queue_not_enabled():
+ io = Interface(lambda x: x, "text", "text", examples=[["freddy"]])
+ app, _, _ = io.launch(prevent_thread_lock=True)
+ assert io.show_api
+ io.close()
+ io.launch(prevent_thread_lock=True, show_api=False)
+ assert not io.show_api
+
+
if __name__ == "__main__":
unittest.main()
| API call not stack with the queue from the website.
### Describe the bug
I have a website about 5k DAU ,and api call from another service through api doc , i can see from console these api from another service jump queue and process immediately.
Is this supposed behavior or a bug?how can i fix this ?
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
call api when there is a queue in website : https://ai-creator.net/aiart ,
I am hosting stable diffusion webui, i am not so sure how can you debug online.
### Screenshot
_No response_
### Logs
```shell
...
```

you can see there are two jobs ,but i have only set concurrency_count=1 in my code.
### System Info
```shell
>>> import gradio as gr
>>> gr.__version__
'3.4b3'
>>>
```
### Severity
serious, but I can work around it
| Hi @xiaol can you please provide a link your website and/or code to your demo so that we can try to reproduce this issue?
And one question -- have you enabled the queue for your demo? This should not happen if you have enabled the queue
sorry for this absurd information , website is here https://ai-creator.net/aiart ,
and this is the launch code:
```
def run(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
gradio_params = {
'inbrowser': opt.inbrowser,
'server_name': '0.0.0.0',
'server_port': opt.port,
'share': opt.share,
'show_error': True,
'debug':True
}
if not opt.share:
demo.queue(concurrency_count=1, client_position_to_load_data=30)
if opt.share and opt.share_password:
gradio_params['auth'] = ('webui', opt.share_password)
# gradio_params['auth'] = same_auth
# Check to see if Port 7860 is open
port_status = 1
while port_status != 0:
try:
self.demo.launch(**gradio_params)
except (OSError) as e:
print (f'Error: Port: {opt.port} is not open yet. Please wait, this may take upwards of 60 seconds...')
time.sleep(10)
else:
port_status = 0
def stop(self):
self.demo.close() # this tends to hang
def launch_server():
server_thread = ServerLauncher(demo)
server_thread.start()
try:
while server_thread.is_alive():
time.sleep(60)
except (KeyboardInterrupt, OSError) as e:
crash(e, 'Shutting down...')
```
this is the txt2img button code:
```
use_queue = True
txt2img_btn.click(
txt2img_func,
txt2img_inputs,
txt2img_outputs,
api_name='txt2img',
queue=use_queue
)
```
Thanks @xiaol, one more clarification -- when you say "api call from another service through api doc", do you mean they make a POST request to the `/api/predict` endpoint? Would you like to disable this behavior or would like to preserve this behavior but ensure users join the queue?
Yes, make a POST request to /api/predict endpoint , and this would slow the online users' queue, it gives a privilege to api reuquest. For some api it would be fine, but some should queue with the website users since everyone can make api request. Of course , it makes online service vulnerable, but i haven't firgure out how to deal with it.
Because i wanna to charge and limit the api ,maybe gives them privilege is fine for now.
But after all, i wanna to control the behavior to give different api or users privilege to jump the queue, if i monetize this product.
And from what i observed , users always join the queue, except you use a phone browser and switch the browser to backend ,would stuck your queue, maybe i should post another issue about this problem.
Thank you for your patience for my bad writing skill.
Hi @xiaol !
Yes it’s true that the backend api is open when the queue is enabled, allowing users who use the rest api directly to skip the ones in the queue.
One thing that I think we can implement in Gradio is to block all requests to the /api/ end point by default if the queue for that particular route is enabled. We can add an ‘open_routes’ parameter to the queue method so that ‘queue(open_routes=True)’ means the route is not blocked when the queue is enabled (the current behavior).
That should let you control whether you want to prevent api users to skip the queue or not.
I’m not sure if the “charging for api” usage feature you talk about should live in Gradio. But I think you can implement your own rate limiting functionality as a FastApi middleware https://fastapi.tiangolo.com/tutorial/middleware/
I see, a switcher of api is fine.Middleware is a good practice ,great to know ,i'll work around it.
| 2022-10-18T20:20:54 |
gradio-app/gradio | 2,588 | gradio-app__gradio-2588 | [
"2549"
] | e6cda90b69b078662395a58ccfca9bd00b5912b8 | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -2023,7 +2023,7 @@ def style(
)
def as_example(self, input_data: str) -> str:
- return Path(input_data).name
+ return Path(input_data).name if input_data else ""
@document("change", "clear", "style")
@@ -2202,8 +2202,10 @@ def style(
**kwargs,
)
- def as_example(self, input_data: str | List) -> str:
- if isinstance(input_data, list):
+ def as_example(self, input_data: str | List | None) -> str | List[str]:
+ if input_data is None:
+ return ""
+ elif isinstance(input_data, list):
return [Path(file).name for file in input_data]
else:
return Path(input_data).name
@@ -3615,7 +3617,7 @@ def style(self, **kwargs):
)
def as_example(self, input_data: str) -> str:
- return Path(input_data).name
+ return Path(input_data).name if input_data else ""
@document("change", "clear")
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -1921,10 +1921,11 @@ def test_dataframe_as_example_converts_dataframes():
assert df_comp.as_example(np.array([[1, 2], [3, 4.0]])) == [[1.0, 2.0], [3.0, 4.0]]
[email protected]("component", [gr.Model3D, gr.File])
[email protected]("component", [gr.Model3D, gr.File, gr.Audio])
def test_as_example_returns_file_basename(component):
component = component()
assert component.as_example("/home/freddy/sources/example.ext") == "example.ext"
+ assert component.as_example(None) == ""
@patch("gradio.components.IOComponent.as_example")
| Cannot load examples with missing file paths
### Describe the bug
For the moment it is only possible to either provide no file path in all example rows, either existing file paths for all examples.
This is due to exception raising at line _example[i] = component.as_example(ex)_, in _components.py_, where the File component expects str, bytes or os.PathLike object, not NoneType.
I'd like to have the possibility to add file paths only for some on the examples, as it is easily doable with other components eg Text.
(I'm using Gradio 3.7)
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
def simple_fct(text, file):
return text + 'ok'
demo = gr.Interface(
simple_fct,
['text', 'file'],
['text'],
examples=[['text1', 'main_feng.py'], ['', None]]
)
demo.launch(debug=True)
### Screenshot
_No response_
### Logs
```shell
Traceback (most recent call last):
File <path>Python310\lib\site-packages\gradio\interface.py", line 651, in __init__
self.examples_handler = Examples(
File <path>Python310\lib\site-packages\gradio\examples.py", line 44, in create_examples
examples_obj = Examples(
File <path>Python310\lib\site-packages\gradio\examples.py", line 215, in __init__
self.dataset = Dataset(
File <path>Python310\lib\site-packages\gradio\components.py", line 3809, in __init__
example[i] = component.as_example(ex)
File <path>Python310\lib\site-packages\gradio\components.py", line 2209, in as_example
return Path(input_data).name
File <path>Python310\lib\pathlib.py", line 960, in __new__
self = cls._from_parts(args)
File <path>Python310\lib\pathlib.py", line 594, in _from_parts
drv, root, parts = self._parse_args(args)
File <path>Python310\lib\pathlib.py", line 578, in _parse_args
a = os.fspath(a)
TypeError: expected str, bytes or os.PathLike object, not NoneType
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "main.py", line 24, in <module>
demo = gr.Interface(
File <path>Python310\lib\site-packages\gradio\interface.py", line 432, in __init__
with self:
File <path>Python310\lib\site-packages\gradio\blocks.py", line 971, in __exit__
self.config = self.get_config_file()
File <path>Python310\lib\site-packages\gradio\blocks.py", line 947, in get_config_file
"props": utils.delete_none(block.get_config())
File <path>Python310\lib\site-packages\gradio\components.py", line 3822, in get_config
"headers": self.headers,
AttributeError: 'Dataset' object has no attribute 'headers'
Process finished with exit code 1
```
### System Info
```shell
gradio 3.7 run from PyCharm 2022.2.3 (Community Edition)
```
### Severity
annoying
| Thanks for filing @SoranaBaciu ! | 2022-11-01T15:48:34 |
gradio-app/gradio | 2,639 | gradio-app__gradio-2639 | [
"2636"
] | ebb65eb9eedf416821e5ce733cd69d9ecb8e66e9 | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -421,10 +421,11 @@ def postprocess_update_dict(block: Block, update_dict: Dict, postprocess: bool =
update_dict: The original update dictionary
postprocess: Whether to postprocess the "value" key of the update dictionary.
"""
- prediction_value = block.get_specific_update(update_dict)
- if prediction_value.get("value") is components._Keywords.NO_VALUE:
- prediction_value.pop("value")
- prediction_value = delete_none(prediction_value, skip_value=True)
+ if update_dict.get("__type__", "") == "generic_update":
+ update_dict = block.get_specific_update(update_dict)
+ if update_dict.get("value") is components._Keywords.NO_VALUE:
+ update_dict.pop("value")
+ prediction_value = delete_none(update_dict, skip_value=True)
if "value" in prediction_value and postprocess:
prediction_value["value"] = block.postprocess(prediction_value["value"])
return prediction_value
| diff --git a/test/test_blocks.py b/test/test_blocks.py
--- a/test/test_blocks.py
+++ b/test/test_blocks.py
@@ -391,6 +391,35 @@ def infer(x):
"value": gr.media_data.BASE64_IMAGE,
}
+ @pytest.mark.asyncio
+ async def test_blocks_update_interactive(
+ self,
+ ):
+ def specific_update():
+ return [
+ gr.Image.update(interactive=True),
+ gr.Textbox.update(interactive=True),
+ ]
+
+ def generic_update():
+ return [gr.update(interactive=True), gr.update(interactive=True)]
+
+ with gr.Blocks() as demo:
+ run = gr.Button(value="Make interactive")
+ image = gr.Image()
+ textbox = gr.Text()
+ run.click(specific_update, None, [image, textbox])
+ run.click(generic_update, None, [image, textbox])
+
+ for fn_index in range(2):
+ output = await demo.process_api(fn_index, [])
+ assert output["data"][0] == {
+ "interactive": True,
+ "__type__": "update",
+ "mode": "dynamic",
+ }
+ assert output["data"][1] == {"__type__": "update", "mode": "dynamic"}
+
class TestCallFunction:
@pytest.mark.asyncio
@@ -677,7 +706,22 @@ def test_without_update(self):
def test_with_update(self):
specific_update = gr.Textbox.get_specific_update(
- {"lines": 4, "__type__": "update"}
+ {"lines": 4, "__type__": "update", "interactive": False}
+ )
+ assert specific_update == {
+ "lines": 4,
+ "max_lines": None,
+ "placeholder": None,
+ "label": None,
+ "show_label": None,
+ "visible": None,
+ "value": gr.components._Keywords.NO_VALUE,
+ "__type__": "update",
+ "mode": "static",
+ }
+
+ specific_update = gr.Textbox.get_specific_update(
+ {"lines": 4, "__type__": "update", "interactive": True}
)
assert specific_update == {
"lines": 4,
@@ -688,19 +732,26 @@ def test_with_update(self):
"visible": None,
"value": gr.components._Keywords.NO_VALUE,
"__type__": "update",
+ "mode": "dynamic",
}
def test_with_generic_update(self):
specific_update = gr.Video.get_specific_update(
- {"visible": True, "value": "test.mp4", "__type__": "generic_update"}
+ {
+ "visible": True,
+ "value": "test.mp4",
+ "__type__": "generic_update",
+ "interactive": True,
+ }
)
assert specific_update == {
"source": None,
"label": None,
"show_label": None,
- "interactive": None,
"visible": True,
"value": "test.mp4",
+ "mode": "dynamic",
+ "interactive": True,
"__type__": "update",
}
diff --git a/test/test_examples.py b/test/test_examples.py
--- a/test/test_examples.py
+++ b/test/test_examples.py
@@ -151,7 +151,10 @@ async def test_caching_with_update(self):
cache_examples=True,
)
prediction = await io.examples_handler.load_from_cache(1)
- assert prediction[0] == {"visible": False, "__type__": "update"}
+ assert prediction[0] == {
+ "visible": False,
+ "__type__": "update",
+ }
@pytest.mark.asyncio
async def test_caching_with_mix_update(self):
@@ -163,7 +166,11 @@ async def test_caching_with_mix_update(self):
cache_examples=True,
)
prediction = await io.examples_handler.load_from_cache(1)
- assert prediction[0] == {"lines": 4, "value": "hello", "__type__": "update"}
+ assert prediction[0] == {
+ "lines": 4,
+ "value": "hello",
+ "__type__": "update",
+ }
@pytest.mark.asyncio
async def test_caching_with_dict(self):
@@ -171,15 +178,18 @@ async def test_caching_with_dict(self):
out = gr.Label()
io = gr.Interface(
- lambda _: {text: gr.update(lines=4), out: "lion"},
+ lambda _: {text: gr.update(lines=4, interactive=False), out: "lion"},
"textbox",
[text, out],
examples=["abc"],
cache_examples=True,
)
prediction = await io.examples_handler.load_from_cache(0)
- assert prediction == [{"lines": 4, "__type__": "update"}, {"label": "lion"}]
assert not any(d["trigger"] == "fake_event" for d in io.config["dependencies"])
+ assert prediction == [
+ {"lines": 4, "__type__": "update", "mode": "static"},
+ {"label": "lion"},
+ ]
def test_raise_helpful_error_message_if_providing_partial_examples(self, tmp_path):
def foo(a, b):
| `gr.Update` doesn't work for the `interactive` property
### Describe the bug
I would like to programatically swap whether a field is interactive or not depending on other things that happen in my application, however the `interactive` property seem not to be working with the `gr.Update` function
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
By taking the demo in the docs page and just adding an `interactive=False` to the first choice:
```py
import gradio as gr
def change_textbox(choice):
if choice == "short":
return gr.Textbox.update(lines=2, visible=True, interactive=False)
elif choice == "long":
return gr.Textbox.update(lines=8, visible=True)
else:
return gr.Textbox.update(visible=False)
with gr.Blocks() as demo:
radio = gr.Radio(
["short", "long", "none"], label="What kind of essay would you like to write?"
)
text = gr.Textbox(lines=2, interactive=True)
radio.change(fn=change_textbox, inputs=radio, outputs=text)
if __name__ == "__main__":
demo.launch()
```
I get the following error
```
raceback (most recent call last):
File "/home/poli/miniconda3/envs/multimodal/lib/python3.9/site-packages/gradio/routes.py", line 289, in run_predict
output = await app.blocks.process_api(
File "/home/poli/miniconda3/envs/multimodal/lib/python3.9/site-packages/gradio/blocks.py", line 983, in process_api
data = self.postprocess_data(fn_index, result["prediction"], state)
File "/home/poli/miniconda3/envs/multimodal/lib/python3.9/site-packages/gradio/blocks.py", line 924, in postprocess_data
prediction_value = postprocess_update_dict(
File "/home/poli/miniconda3/envs/multimodal/lib/python3.9/site-packages/gradio/blocks.py", line 424, in postprocess_update_dict
prediction_value = block.get_specific_update(update_dict)
File "/home/poli/miniconda3/envs/multimodal/lib/python3.9/site-packages/gradio/blocks.py", line 265, in get_specific_update
generic_update = cls.update(**generic_update)
TypeError: update() got an unexpected keyword argument 'mode'
```
### Screenshot
_No response_
### Logs
```shell
-
```
### System Info
```shell
Gradio == 3.9.1
```
### Severity
serious, but I can work around it
| 2022-11-10T23:14:00 |
|
gradio-app/gradio | 2,640 | gradio-app__gradio-2640 | [
"2638"
] | a8f961ec7ea4dbb10f8303b9f48d28af83aacbf9 | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -622,6 +622,13 @@ def iterate_over_children(children_list):
# add the event triggers
for dependency, fn in zip(config["dependencies"], fns):
+ # We used to add a "fake_event" to the config to cache examples
+ # without removing it. This was causing bugs in calling gr.Interface.load
+ # We fixed the issue by removing "fake_event" from the config in examples.py
+ # but we still need to skip these events when loading the config to support
+ # older demos
+ if dependency["trigger"] == "fake_event":
+ continue
targets = dependency.pop("targets")
trigger = dependency.pop("trigger")
dependency.pop("backend_fn")
diff --git a/gradio/examples.py b/gradio/examples.py
--- a/gradio/examples.py
+++ b/gradio/examples.py
@@ -296,6 +296,9 @@ async def cache(self) -> None:
if self.batch:
output = [value[0] for value in output]
cache_logger.flag(output)
+ # Remove the "fake_event" to prevent bugs in loading interfaces from spaces
+ Context.root_block.dependencies.remove(dependency)
+ Context.root_block.fns.pop(fn_index)
async def load_from_cache(self, example_id: int) -> List[Any]:
"""Loads a particular cached example for the interface.
| diff --git a/test/test_examples.py b/test/test_examples.py
--- a/test/test_examples.py
+++ b/test/test_examples.py
@@ -179,6 +179,7 @@ async def test_caching_with_dict(self):
)
prediction = await io.examples_handler.load_from_cache(0)
assert prediction == [{"lines": 4, "__type__": "update"}, {"label": "lion"}]
+ assert not any(d["trigger"] == "fake_event" for d in io.config["dependencies"])
def test_raise_helpful_error_message_if_providing_partial_examples(self, tmp_path):
def foo(a, b):
diff --git a/test/test_external.py b/test/test_external.py
--- a/test/test_external.py
+++ b/test/test_external.py
@@ -274,6 +274,15 @@ def test_root_url(self):
]
)
+ def test_interface_with_examples(self):
+ # This demo has the "fake_event" correctly removed
+ demo = gr.Interface.load("spaces/freddyaboulton/calculator")
+ assert demo(2, "add", 3) == 5
+
+ # This demo still has the "fake_event". both should work
+ demo = gr.Interface.load("spaces/abidlabs/test-calculator-2")
+ assert demo(2, "add", 4) == 6
+
def test_get_tabular_examples_replaces_nan_with_str_nan():
readme = """
| `gr.Interface.load()` not working if the loaded Space has examples
### Describe the bug
It seems that `gr.Interface.load()` is not able to load a Space if the Space has examples
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```py
import gradio as gr
io = gr.Interface.load("spaces/abidlabs/test-calculator-2")
```
### Screenshot
_No response_
### Logs
```shell
KeyError Traceback (most recent call last)
<ipython-input-9-1a3d663f2b76> in <module>
----> 1 io = gr.Interface.load("spaces/abidlabs/test-calculator-2")
c:\users\islam\dev\gradio-repos\gradio\gradio\interface.py in load(cls, name, src, api_key, alias, **kwargs)
106 demo.launch()
107 """
--> 108 return super().load(name=name, src=src, api_key=api_key, alias=alias)
109
110 @classmethod
c:\users\islam\dev\gradio-repos\gradio\gradio\blocks.py in load(self_or_cls, fn, inputs, outputs, name, src, api_key, alias, _js, every, **kwargs)
1112 "Blocks.load() requires passing parameters as keyword arguments"
1113 )
-> 1114 return external.load_blocks_from_repo(name, src, api_key, alias, **kwargs)
1115 else:
1116 return self_or_cls.set_event_trigger(
c:\users\islam\dev\gradio-repos\gradio\gradio\external.py in load_blocks_from_repo(name, src, api_key, alias, **kwargs)
55 )
56
---> 57 blocks: gradio.Blocks = factory_methods[src](name, api_key, alias, **kwargs)
58 return blocks
59
c:\users\islam\dev\gradio-repos\gradio\gradio\external.py in from_spaces(space_name, api_key, alias, **kwargs)
346 )
347 else: # Create a Blocks for Gradio 3.x Spaces
--> 348 return from_spaces_blocks(config, api_key, iframe_url)
349
350
c:\users\islam\dev\gradio-repos\gradio\gradio\external.py in from_spaces_blocks(config, api_key, iframe_url)
397 else:
398 fns.append(None)
--> 399 return gradio.Blocks.from_config(config, fns, iframe_url)
400
401
c:\users\islam\dev\gradio-repos\gradio\gradio\blocks.py in from_config(cls, config, fns, root_url)
638
639 for target in targets:
--> 640 dependency = original_mapping[target].set_event_trigger(
641 event_name=trigger, fn=fn, **dependency
642 )
KeyError: 2
```
### System Info
```shell
3.9.1
```
### Severity
serious, but I can work around it
| I think the problem is related to the "fake event" used in example caching:
Just checked out the config of the upstream calculator space and id 2 corresponds to the fake event:
```
{"targets":[2],"trigger":"fake_event","inputs":[0,1,3],"outputs":[4],"backend_fn":true,"js":null,"queue":null,"api_name":null,"scroll_to_output":false,"show_progress":true,"every":null,"batch":false,"max_batch_size":4,"cancels":[]}
```
Can take a look at a fix | 2022-11-11T20:51:49 |
gradio-app/gradio | 2,690 | gradio-app__gradio-2690 | [
"2668"
] | 19462299f161c1e544a6b2f422dea0e761b8e721 | diff --git a/gradio/layouts.py b/gradio/layouts.py
--- a/gradio/layouts.py
+++ b/gradio/layouts.py
@@ -365,10 +365,12 @@ def get_config(self):
@staticmethod
def update(
open: Optional[bool] = None,
+ label: Optional[str] = None,
visible: Optional[bool] = None,
):
return {
"visible": visible,
+ "label": label,
"open": open,
"__type__": "update",
}
| diff --git a/test/test_blocks.py b/test/test_blocks.py
--- a/test/test_blocks.py
+++ b/test/test_blocks.py
@@ -759,6 +759,36 @@ def test_with_generic_update(self):
"__type__": "update",
}
+ @pytest.mark.asyncio
+ async def test_accordion_update(self):
+ with gr.Blocks() as demo:
+ with gr.Accordion(label="Open for greeting", open=False) as accordion:
+ gr.Textbox("Hello!")
+ open_btn = gr.Button(label="Open Accordion")
+ close_btn = gr.Button(label="Close Accordion")
+ open_btn.click(
+ lambda: gr.Accordion.update(open=True, label="Open Accordion"),
+ inputs=None,
+ outputs=[accordion],
+ )
+ close_btn.click(
+ lambda: gr.Accordion.update(open=False, label="Closed Accordion"),
+ inputs=None,
+ outputs=[accordion],
+ )
+ result = await demo.process_api(fn_index=0, inputs=[None], request=None)
+ assert result["data"][0] == {
+ "open": True,
+ "label": "Open Accordion",
+ "__type__": "update",
+ }
+ result = await demo.process_api(fn_index=1, inputs=[None], request=None)
+ assert result["data"][0] == {
+ "open": False,
+ "label": "Closed Accordion",
+ "__type__": "update",
+ }
+
class TestRender:
def test_duplicate_error(self):
| Be able to update accordion label from backend
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
Would be nice if developers could update the accordion label from the backend.
Example use case,
You have a drop down with dataset names on the left hand side and information about the dataset in different accordion components. You'd like the accordion title to reflect the current dataset selected.
For example, in the UI below, it might be nice if instead of "Dataset Description" it read "<dataset-name> description" or "Label Distribution for <dataset-name>"

**Describe the solution you'd like**
```python
gr.Accordion.update(label=<new-label>)
```
**Additional context**
Add any other context or screenshots about the feature request here.
| Additionally, would be great to also be able to update the status of the accordion (expanded or not) via the backend | 2022-11-21T21:41:27 |
gradio-app/gradio | 2,736 | gradio-app__gradio-2736 | [
"2629"
] | 5ce13ab6cb795c6b27902709482ab4abe4e118fe | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -3038,6 +3038,7 @@ def __init__(
show_label: bool = True,
visible: bool = True,
elem_id: Optional[str] = None,
+ color: Optional[str] = None,
**kwargs,
):
"""
@@ -3048,8 +3049,10 @@ def __init__(
show_label: if True, will display label.
visible: If False, component will be hidden.
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
+ color: The background color of the label (either a valid css color name or hexadecimal string).
"""
self.num_top_classes = num_top_classes
+ self.color = color
IOComponent.__init__(
self,
label=label,
@@ -3064,6 +3067,7 @@ def get_config(self):
return {
"num_top_classes": self.num_top_classes,
"value": self.value,
+ "color": self.color,
**IOComponent.get_config(self),
}
@@ -3105,12 +3109,24 @@ def update(
label: Optional[str] = None,
show_label: Optional[bool] = None,
visible: Optional[bool] = None,
+ color: Optional[str] = _Keywords.NO_VALUE,
):
+ # If color is not specified (NO_VALUE) map it to None so that
+ # it gets filtered out in postprocess. This will mean the color
+ # will not be updated in the front-end
+ if color is _Keywords.NO_VALUE:
+ color = None
+ # If the color was specified by the developer as None
+ # Map is so that the color is updated to be transparent,
+ # e.g. no background default state.
+ elif color is None:
+ color = "transparent"
updated_config = {
"label": label,
"show_label": show_label,
"visible": visible,
"value": value,
+ "color": color,
"__type__": "update",
}
return updated_config
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -1352,8 +1352,31 @@ def test_component_functions(self):
"visible": True,
"interactive": None,
"root_url": None,
+ "color": None,
}
+ def test_color_argument(self):
+
+ label = gr.Label(value=-10, color="red")
+ assert label.get_config()["color"] == "red"
+ update_1 = gr.Label.update(value="bad", color="brown")
+ assert update_1["color"] == "brown"
+ update_2 = gr.Label.update(value="bad", color="#ff9966")
+ assert update_2["color"] == "#ff9966"
+
+ update_3 = gr.Label.update(
+ value={"bad": 0.9, "good": 0.09, "so-so": 0.01}, color="green"
+ )
+ assert update_3["color"] == "green"
+
+ update_4 = gr.Label.update(value={"bad": 0.8, "good": 0.18, "so-so": 0.02})
+ assert update_4["color"] is None
+
+ update_5 = gr.Label.update(
+ value={"bad": 0.8, "good": 0.18, "so-so": 0.02}, color=None
+ )
+ assert update_5["color"] == "transparent"
+
@pytest.mark.asyncio
async def test_in_interface(self):
"""
@@ -1590,7 +1613,9 @@ def bold_text(text):
class TestMarkdown:
def test_component_functions(self):
markdown_component = gr.Markdown("# Let's learn about $x$", label="Markdown")
- assert markdown_component.get_config()["value"].startswith("""<h1>Let\'s learn about <span class="math inline"><span style=\'font-size: 0px\'>x</span><svg xmlns:xlink="http://www.w3.org/1999/xlink" width="11.6pt" height="19.35625pt" viewBox="0 0 11.6 19.35625" xmlns="http://www.w3.org/2000/svg" version="1.1">\n \n <defs>\n <style type="text/css">*{stroke-linejoin: round; stroke-linecap: butt}</style>\n </defs>\n <g id="figure_1">\n <g id="patch_1">\n <path d="M 0 19.35625""")
+ assert markdown_component.get_config()["value"].startswith(
+ """<h1>Let\'s learn about <span class="math inline"><span style=\'font-size: 0px\'>x</span><svg xmlns:xlink="http://www.w3.org/1999/xlink" width="11.6pt" height="19.35625pt" viewBox="0 0 11.6 19.35625" xmlns="http://www.w3.org/2000/svg" version="1.1">\n \n <defs>\n <style type="text/css">*{stroke-linejoin: round; stroke-linecap: butt}</style>\n </defs>\n <g id="figure_1">\n <g id="patch_1">\n <path d="M 0 19.35625"""
+ )
@pytest.mark.asyncio
async def test_in_interface(self):
| Improvements to Label for Dashboarding
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
As discussed today in our workshop, the gr.Number component does not work great for dashboarding uses where you want to display a number.
<img width="1344" alt="image" src="https://user-images.githubusercontent.com/41651716/200894524-8612c9d2-ddb5-41f9-95db-8bb8bed3f57b.png">
@abidlabs suggested using the Label component instead of a number and it works much better!
<img width="1336" alt="image" src="https://user-images.githubusercontent.com/41651716/200896004-47fbf4e7-a80f-4d31-afb3-57f39fa12c67.png">
However there some improvements we can make (detailed below)
**Describe the solution you'd like**
- Be able to set a color for the Label's Box. That way developers can use it to create "Alert" boxes for metrics.
Something like
```python
return gr.Label.update(value=100, color="#FF0000")
```
**Additional context**
Add any other context or screenshots about the feature request here.
| Very nice!
Maybe it would be cool if the box color parameter could accept a function based on the value. E.g. like this:
```py
def shade(val):
if val==0:
return "green"
else:
return "#FF0000"
gr.Label(box_color=shade)
```
That way as the label changes, the box color is automatically updated | 2022-11-28T22:07:40 |
gradio-app/gradio | 2,856 | gradio-app__gradio-2856 | [
"2821",
"2822"
] | dc200af89c24ec1e1d506e79f7d79bca4640df19 | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -354,15 +354,17 @@ def __get__(self, instance, type_):
return descr_get(instance, type_)
+set_documentation_group("component-helpers")
+
+
@document()
def update(**kwargs) -> dict:
"""
- Updates component properties.
+ Updates component properties. When a function passed into a Gradio Interface or a Blocks events returns a typical value, it updates the value of the output component. But it is also possible to update the properties of an output component (such as the number of lines of a `Textbox` or the visibility of an `Image`) by returning the component's `update()` function, which takes as parameters any of the constructor parameters for that component.
This is a shorthand for using the update method on a component.
For example, rather than using gr.Number.update(...) you can just use gr.update(...).
Note that your editor's autocompletion will suggest proper parameters
if you use the update method on the component.
-
Demos: blocks_essay, blocks_update, blocks_essay_update
Parameters:
@@ -397,6 +399,9 @@ def change_textbox(choice):
return kwargs
+set_documentation_group("blocks")
+
+
def skip() -> dict:
return update()
diff --git a/gradio/routes.py b/gradio/routes.py
--- a/gradio/routes.py
+++ b/gradio/routes.py
@@ -537,7 +537,6 @@ class Request:
query parameters and other information about the request from within the prediction
function. The class is a thin wrapper around the fastapi.Request class. Attributes
of this class include: `headers`, `client`, `query_params`, and `path_params`,
-
Example:
import gradio as gr
def echo(name, request: gr.Request):
@@ -551,6 +550,8 @@ def __init__(self, request: fastapi.Request | None = None, **kwargs):
"""
Can be instantiated with either a fastapi.Request or by manually passing in
attributes (needed for websocket-based queueing).
+ Parameters:
+ request: A fastapi.Request
"""
self.request: fastapi.Request = request
self.kwargs: Dict = kwargs
| CSS glitch in docs
### Describe the bug
Weird formatting for one of the example buttons
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
https://gradio.app/docs/#request
### Screenshot
<img width="1074" alt="Screen Shot 2022-12-15 at 5 58 52 PM" src="https://user-images.githubusercontent.com/9021060/207878696-3d9fc101-b884-4dd6-9ccd-75836ac8cb35.png">
### Logs
```shell
N/A
```
### System Info
```shell
N/A
```
### Severity
annoying
Navigation issues with Component Helpers in docs
### Describe the bug
Navigation behaves weird with short sections
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
https://gradio.app/docs/#update
### Screenshot

### Logs
```shell
N/A
```
### System Info
```shell
N/A
```
### Severity
annoying
| 2022-12-20T10:49:31 |
||
gradio-app/gradio | 2,861 | gradio-app__gradio-2861 | [
"2186"
] | 612ab0539e87cd446afe7b2927f54de4151e0268 | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -4834,6 +4834,7 @@ def __init__(
samples: List[List[Any]] = None,
headers: Optional[List[str]] = None,
type: str = "values",
+ samples_per_page: int = 10,
visible: bool = True,
elem_id: Optional[str] = None,
**kwargs,
@@ -4844,6 +4845,7 @@ def __init__(
samples: a nested list of samples. Each sublist within the outer list represents a data sample, and each element within the sublist represents an value for each component
headers: Column headers in the Dataset widget, should be the same len as components. If not provided, inferred from component labels
type: 'values' if clicking on a sample should pass the value of the sample, or "index" if it should pass the index of the sample
+ samples_per_page: how many examples to show per page.
visible: If False, component will be hidden.
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
"""
@@ -4861,6 +4863,7 @@ def __init__(
self.headers = []
else:
self.headers = [c.label or "" for c in self.components]
+ self.samples_per_page = samples_per_page
def get_config(self):
return {
@@ -4869,6 +4872,7 @@ def get_config(self):
"samples": self.samples,
"type": self.type,
"label": self.label,
+ "samples_per_page": self.samples_per_page,
**Component.get_config(self),
}
diff --git a/gradio/examples.py b/gradio/examples.py
--- a/gradio/examples.py
+++ b/gradio/examples.py
@@ -97,7 +97,7 @@ def __init__(
outputs: optionally, provide the component or list of components corresponding to the output of the examples. Required if `cache` is True.
fn: optionally, provide the function to run to generate the outputs corresponding to the examples. Required if `cache` is True.
cache_examples: if True, caches examples for fast runtime. If True, then `fn` and `outputs` need to be provided
- examples_per_page: how many examples to show per page (this parameter currently has no effect)
+ examples_per_page: how many examples to show per page.
label: the label to use for the examples component (by default, "Examples")
elem_id: an optional string that is assigned as the id of this component in the HTML DOM.
run_on_click: if cache_examples is False, clicking on an example does not run the function when an example is clicked. Set this to True to run the function when an example is clicked. Has no effect if cache_examples is True.
@@ -182,7 +182,6 @@ def __init__(
self.outputs = outputs
self.fn = fn
self.cache_examples = cache_examples
- self.examples_per_page = examples_per_page
self._api_mode = _api_mode
self.preprocess = preprocess
self.postprocess = postprocess
@@ -218,6 +217,7 @@ def __init__(
samples=non_none_examples,
type="index",
label=label,
+ samples_per_page=examples_per_page,
elem_id=elem_id,
)
| diff --git a/test/test_examples.py b/test/test_examples.py
--- a/test/test_examples.py
+++ b/test/test_examples.py
@@ -45,6 +45,10 @@ def test_handle_directory_with_log_file(self):
for sample in examples.dataset.samples:
assert os.path.isabs(sample[0])
+ def test_examples_per_page(self):
+ examples = gr.Examples(["hello", "hi"], gr.Textbox(), examples_per_page=2)
+ assert examples.dataset.get_config()["samples_per_page"] == 2
+
@pytest.mark.asyncio
async def test_no_preprocessing(self):
with gr.Blocks():
diff --git a/test/test_interfaces.py b/test/test_interfaces.py
--- a/test/test_interfaces.py
+++ b/test/test_interfaces.py
@@ -132,10 +132,13 @@ def test_interface_browser(self, mock_browser):
def test_examples_list(self):
examples = ["test1", "test2"]
- interface = Interface(lambda x: x, "textbox", "label", examples=examples)
+ interface = Interface(
+ lambda x: x, "textbox", "label", examples=examples, examples_per_page=2
+ )
interface.launch(prevent_thread_lock=True)
assert len(interface.examples_handler.examples) == 2
assert len(interface.examples_handler.examples[0]) == 1
+ assert interface.examples_handler.dataset.get_config()["samples_per_page"] == 2
interface.close()
@mock.patch("IPython.display.display")
| Pagination for Examples
As a gradio user, I would like to provide many examples for my app without displaying all of them in the page when the app loads.
As a possible solution, we should expose an `overflow_row_behavior` for `Dataset`/`Examples` like we do for `Dataframe`.
This was originally asked in the Discussion sections:
> Most of the demo that use gr.Examples add sample statically. Is it currently possible to load/update samples dynamically in the case one have a large dataset so potentially need pagination ?
>
> _Originally posted by @jrabary in https://github.com/gradio-app/gradio/discussions/2163_
| Would it be possible to somehow achieve this using Blocks in the meantime? I'd like to implement a pagination feature but can't seem to figure out how to.
EDIT: I just saw your example in the linked discussion and tabs are what I was after! Sorry for the noise.
Awesome @cakiki !
Also FYI - if you have more than 10 examples gradio will paginate them for you as well
```python
import gradio as gr
with gr.Blocks() as demo:
current_example_values = gr.State([])
input_ = gr.Number(label="Input")
output = gr.Number(label="Input * 2")
calc = gr.Button(value="Calculate")
examples = gr.Examples(examples=[[i] for i in range(20)], inputs=[input_])
calc.click(lambda s: s * 2, input_, output)
demo.launch()
```
<img width="1317" alt="image" src="https://user-images.githubusercontent.com/41651716/207631487-83f11c65-dc4f-4e65-981e-801019ad2d8f.png">
There is an `examples_per_page` argument that lets you control how many examples are needed before pagination happens. But it's not being passed all the way through. Will fix that soon!
| 2022-12-20T17:45:33 |
gradio-app/gradio | 2,866 | gradio-app__gradio-2866 | [
"2562"
] | 612ab0539e87cd446afe7b2927f54de4151e0268 | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -899,7 +899,7 @@ def __init__(
self,
choices: Optional[List[str]] = None,
*,
- value: List[str] | Callable = None,
+ value: List[str] | str | Callable = None,
type: str = "value",
label: Optional[str] = None,
every: float | None = None,
@@ -952,7 +952,7 @@ def get_config(self):
@staticmethod
def update(
- value: Optional[List[str]] = _Keywords.NO_VALUE,
+ value: Optional[List[str] | str] = _Keywords.NO_VALUE,
choices: Optional[List[str]] = None,
label: Optional[str] = None,
show_label: Optional[bool] = None,
@@ -991,15 +991,19 @@ def preprocess(self, x: List[str]) -> List[str] | List[int]:
+ ". Please choose from: 'value', 'index'."
)
- def postprocess(self, y: List[str] | None) -> List[str]:
+ def postprocess(self, y: List[str] | str | None) -> List[str]:
"""
Any postprocessing needed to be performed on function output.
Parameters:
- y: List of selected choices
+ y: List of selected choices. If a single choice is selected, it can be passed in as a string
Returns:
List of selected choices
"""
- return [] if y is None else y
+ if y is None:
+ return []
+ if not isinstance(y, list):
+ y = [y]
+ return y
def set_interpret_parameters(self):
"""
@@ -4834,6 +4838,7 @@ def __init__(
samples: List[List[Any]] = None,
headers: Optional[List[str]] = None,
type: str = "values",
+ samples_per_page: int = 10,
visible: bool = True,
elem_id: Optional[str] = None,
**kwargs,
@@ -4844,6 +4849,7 @@ def __init__(
samples: a nested list of samples. Each sublist within the outer list represents a data sample, and each element within the sublist represents an value for each component
headers: Column headers in the Dataset widget, should be the same len as components. If not provided, inferred from component labels
type: 'values' if clicking on a sample should pass the value of the sample, or "index" if it should pass the index of the sample
+ samples_per_page: how many examples to show per page.
visible: If False, component will be hidden.
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
"""
@@ -4861,6 +4867,7 @@ def __init__(
self.headers = []
else:
self.headers = [c.label or "" for c in self.components]
+ self.samples_per_page = samples_per_page
def get_config(self):
return {
@@ -4869,6 +4876,7 @@ def get_config(self):
"samples": self.samples,
"type": self.type,
"label": self.label,
+ "samples_per_page": self.samples_per_page,
**Component.get_config(self),
}
diff --git a/gradio/examples.py b/gradio/examples.py
--- a/gradio/examples.py
+++ b/gradio/examples.py
@@ -97,7 +97,7 @@ def __init__(
outputs: optionally, provide the component or list of components corresponding to the output of the examples. Required if `cache` is True.
fn: optionally, provide the function to run to generate the outputs corresponding to the examples. Required if `cache` is True.
cache_examples: if True, caches examples for fast runtime. If True, then `fn` and `outputs` need to be provided
- examples_per_page: how many examples to show per page (this parameter currently has no effect)
+ examples_per_page: how many examples to show per page.
label: the label to use for the examples component (by default, "Examples")
elem_id: an optional string that is assigned as the id of this component in the HTML DOM.
run_on_click: if cache_examples is False, clicking on an example does not run the function when an example is clicked. Set this to True to run the function when an example is clicked. Has no effect if cache_examples is True.
@@ -182,7 +182,6 @@ def __init__(
self.outputs = outputs
self.fn = fn
self.cache_examples = cache_examples
- self.examples_per_page = examples_per_page
self._api_mode = _api_mode
self.preprocess = preprocess
self.postprocess = postprocess
@@ -218,6 +217,7 @@ def __init__(
samples=non_none_examples,
type="index",
label=label,
+ samples_per_page=examples_per_page,
elem_id=elem_id,
)
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -493,6 +493,10 @@ def test_component_functions(self):
with pytest.raises(ValueError):
gr.CheckboxGroup(["a"], type="unknown")
+ cbox = gr.CheckboxGroup(choices=["a", "b"], value="c")
+ assert cbox.get_config()["value"] == ["c"]
+ assert cbox.postprocess("a") == ["a"]
+
def test_in_interface(self):
"""
Interface, process
diff --git a/test/test_examples.py b/test/test_examples.py
--- a/test/test_examples.py
+++ b/test/test_examples.py
@@ -45,6 +45,10 @@ def test_handle_directory_with_log_file(self):
for sample in examples.dataset.samples:
assert os.path.isabs(sample[0])
+ def test_examples_per_page(self):
+ examples = gr.Examples(["hello", "hi"], gr.Textbox(), examples_per_page=2)
+ assert examples.dataset.get_config()["samples_per_page"] == 2
+
@pytest.mark.asyncio
async def test_no_preprocessing(self):
with gr.Blocks():
diff --git a/test/test_interfaces.py b/test/test_interfaces.py
--- a/test/test_interfaces.py
+++ b/test/test_interfaces.py
@@ -132,10 +132,13 @@ def test_interface_browser(self, mock_browser):
def test_examples_list(self):
examples = ["test1", "test2"]
- interface = Interface(lambda x: x, "textbox", "label", examples=examples)
+ interface = Interface(
+ lambda x: x, "textbox", "label", examples=examples, examples_per_page=2
+ )
interface.launch(prevent_thread_lock=True)
assert len(interface.examples_handler.examples) == 2
assert len(interface.examples_handler.examples[0]) == 1
+ assert interface.examples_handler.dataset.get_config()["samples_per_page"] == 2
interface.close()
@mock.patch("IPython.display.display")
| Setting value of CheckboxGroup to an integer causes an infinite loading loop.
### Describe the bug
Hello,
Setting `value` of a `ChecboxGroup` to an integer prevents the demo from loading.
That's all!
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Minimal code to reproduce
```
import gradio as gr
def dummy(values):
return "OK"
demo = gr.Interface(
dummy, [gr.CheckboxGroup(["checkbox 1", "checkbox 2", "checkbox 3"], value=0)], "text"
)
if __name__ == "__main__":
demo.launch()
```
### Screenshot

### Logs
```shell
There is no crash, so I am ignoring this one.
```
### System Info
```shell
Gradio: `3.7`
Windows: Version 10.0.19044.2130
Happens on both Firefox and Chrome
```
### Severity
annoying
| Hi @taesiri !
Thanks for filing.
The problem is that `value` has to be a list of strings and each string has to be a a valid possible choice. I think this should fix your problem:
```python
import gradio as gr
def dummy(values):
return "OK"
demo = gr.Interface(
dummy, [gr.CheckboxGroup(["checkbox 1", "checkbox 2", "checkbox 3"], value=["checkbox 1"])], "text"
)
if __name__ == "__main__":
demo.launch()
The front-end crashes with
```
Uncaught (in promise) TypeError: t[0].includes is not a function
at Object.c (CheckboxGroup.svelte:40:20)
at Object.c (CheckboxGroup.svelte:32:4)
at Nt (index.f3976d0d.js:4:5230)
at Object.c (CheckboxGroup.svelte:35:22)
at Object.c (Block.svelte:44:8)
at Object.c (Block.svelte:44:8)
at Nt (index.f3976d0d.js:4:5230)
at Object.c (CheckboxGroup.svelte:24:58)
at Nt (index.f3976d0d.js:4:5230)
at Object.c (index.f3976d0d.js:34:30855)
```
But I think a more helpful error should be raised prior to the demo launching.
Thank you for your help @freddyaboulton
My expectation is that the demo should load fine without crashing or looping. Keep in mind that this will not happen if the `value` is set to a string, which is **not** among the options. | 2022-12-20T21:49:25 |
gradio-app/gradio | 2,868 | gradio-app__gradio-2868 | [
"2574"
] | 943b4ed77a098bf69519515c027b0c12ca8b2573 | diff --git a/demo/lineplot_component/run.py b/demo/lineplot_component/run.py
new file mode 100644
--- /dev/null
+++ b/demo/lineplot_component/run.py
@@ -0,0 +1,21 @@
+import gradio as gr
+from vega_datasets import data
+
+with gr.Blocks() as demo:
+ gr.LinePlot(
+ data.stocks(),
+ x="date",
+ y="price",
+ color="symbol",
+ color_legend_position="bottom",
+ title="Stock Prices",
+ tooltip=["date", "price", "symbol"],
+ height=300,
+ width=500,
+ show_label=False,
+ ).style(
+ container=False,
+ )
+
+if __name__ == "__main__":
+ demo.launch()
diff --git a/demo/live_dashboard/run.py b/demo/live_dashboard/run.py
--- a/demo/live_dashboard/run.py
+++ b/demo/live_dashboard/run.py
@@ -1,9 +1,10 @@
import math
+
+import pandas as pd
+
import gradio as gr
import datetime
-import plotly.express as px
import numpy as np
-import time
def get_time():
@@ -17,9 +18,18 @@ def get_plot(period=1):
global plot_end
x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)
y = np.sin(2 * math.pi * period * x)
- fig = px.line(x=x, y=y)
+ update = gr.LinePlot.update(
+ value=pd.DataFrame({"x": x, "y": y}),
+ x="x",
+ y="y",
+ title="Plot (updates every second)",
+ width=600,
+ height=350,
+ )
plot_end += 2 * math.pi
- return fig
+ if plot_end > 1000:
+ plot_end = 2 * math.pi
+ return update
with gr.Blocks() as demo:
@@ -33,7 +43,7 @@ def get_plot(period=1):
period = gr.Slider(
label="Period of plot", value=1, minimum=0, maximum=10, step=1
)
- plot = gr.Plot(label="Plot (updates every second)")
+ plot = gr.LinePlot(show_label=False)
with gr.Column():
name = gr.Textbox(label="Enter your name")
greeting = gr.Textbox(label="Greeting")
diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -4081,6 +4081,7 @@ class ScatterPlot(Plot):
Postprocessing: expects a pandas dataframe with the data to plot.
Demos: native_plots
+ Guides: creating_a_dashboard_from_bigquery_data
"""
def __init__(
@@ -4420,7 +4421,7 @@ class LinePlot(Plot):
Preprocessing: this component does *not* accept input.
Postprocessing: expects a pandas dataframe with the data to plot.
- Demos: native_plots
+ Demos: native_plots, live_dashboard
"""
def __init__(
| Plotly plot too zoomed in
### Describe the bug
If you run the live_dashboard demo, the sine curve plot is too zoomed in.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Run the `live_dashboard` demo in the repo
### Screenshot

### Logs
```shell
-
```
### System Info
```shell
main
```
### Severity
annoying
| 2022-12-20T23:00:24 |
||
gradio-app/gradio | 2,890 | gradio-app__gradio-2890 | [
"2879"
] | e3434dd4cb5f86bd54ed9dbdae010bcfffd146cc | diff --git a/demo/blocks_style/run.py b/demo/blocks_style/run.py
--- a/demo/blocks_style/run.py
+++ b/demo/blocks_style/run.py
@@ -1,103 +1,57 @@
import gradio as gr
with gr.Blocks(title="Styling Examples") as demo:
- with gr.Column():
- txt = gr.Textbox(label="Small Textbox", lines=1).style(
- rounded=False,
- border=False,
- container=False,
- )
-
- num = gr.Number(label="Number", show_label=False).style(
- rounded=False,
- border=False,
- container=False,
- )
- slider = gr.Slider(label="Slider", show_label=False).style(
- container=False,
- )
- check = gr.Checkbox(label="Checkbox", show_label=False).style(
- rounded=False,
- border=False,
- container=False,
- )
+ with gr.Column(variant="box"):
+ txt = gr.Textbox(label="Small Textbox", lines=1)
+ num = gr.Number(label="Number", show_label=False)
+ slider = gr.Slider(label="Slider", show_label=False)
+ check = gr.Checkbox(label="Checkbox", show_label=False)
check_g = gr.CheckboxGroup(
- label="Checkbox Group", choices=["One", "Two", "Three"], show_label=False
- ).style(rounded=False, container=False, item_container=False)
+ label="Checkbox Group",
+ choices=["One", "Two", "Three"],
+ show_label=False,
+ )
radio = gr.Radio(
label="Radio", choices=["One", "Two", "Three"], show_label=False
).style(
item_container=False,
- container=False,
)
drop = gr.Dropdown(
label="Dropdown", choices=["One", "Two", "Three"], show_label=False
- ).style(
- rounded=False,
- border=False,
- container=False,
- )
- image = gr.Image(show_label=False).style(
- rounded=False,
- )
- video = gr.Video(show_label=False).style(
- rounded=False,
- )
- audio = gr.Audio(show_label=False).style(
- rounded=False,
- )
- file = gr.File(show_label=False).style(
- rounded=False,
- )
- df = gr.Dataframe(show_label=False).style(
- rounded=False,
- )
-
- ts = gr.Timeseries(show_label=False).style(
- rounded=False,
)
+ image = gr.Image(show_label=False)
+ video = gr.Video(show_label=False)
+ audio = gr.Audio(show_label=False)
+ file = gr.File(show_label=False)
+ df = gr.Dataframe(show_label=False)
+ ts = gr.Timeseries(show_label=False)
label = gr.Label().style(
container=False,
)
highlight = gr.HighlightedText(
- "+ hello. - goodbye", show_label=False, color_map={"+": "green", "-": "red"}
- ).style(rounded=False, container=False)
+ "+ hello. - goodbye",
+ show_label=False,
+ ).style(color_map={"+": "green", "-": "red"}, container=False)
json = gr.JSON().style(container=False)
- html = gr.HTML(show_label=False).style()
+ html = gr.HTML(show_label=False)
gallery = gr.Gallery().style(
- rounded=False,
grid=(3, 3, 1),
height="auto",
container=False,
)
- chat = gr.Chatbot("hi", color_map=("pink", "blue")).style(
- rounded=False,
- )
+ chat = gr.Chatbot([("hi", "good bye")]).style(color_map=("pink", "blue"))
- model = gr.Model3D().style(
- rounded=False,
- )
+ model = gr.Model3D()
- gr.Plot().style()
- md = gr.Markdown(show_label=False).style()
+ md = gr.Markdown(show_label=False)
- highlight = gr.HighlightedText().style(
- rounded=False,
- )
+ highlight = gr.HighlightedText()
btn = gr.Button("Run").style(
- rounded=False,
full_width=True,
- border=False,
)
- # Not currently public
- # TODO: Uncomment at next release
- # gr.Dataset().style(
- # rounded=False,
- # margin=False,
- # border=False,
- # )
+ gr.Dataset(components=[txt, num])
if __name__ == "__main__":
| Cant run blocks_style demo
### Describe the bug
Run `python demo/blocks_style/run.py` and you will get an error
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Above
### Screenshot
_No response_
### Logs
```shell
Traceback (most recent call last):
File "demo/blocks_style/run.py", line 88, in <module>
btn = gr.Button("Run").style(
File "/Users/freddy/sources/gradio/gradio/blocks.py", line 1090, in __exit__
self.config = self.get_config_file()
File "/Users/freddy/sources/gradio/gradio/blocks.py", line 1066, in get_config_file
"props": utils.delete_none(block.get_config())
File "/Users/freddy/sources/gradio/gradio/components.py", line 3787, in get_config
"value": self.value,
AttributeError: 'Chatbot' object has no attribute 'value'
```
### System Info
```shell
Main
```
### Severity
annoying
| 2022-12-27T15:51:55 |
||
gradio-app/gradio | 2,915 | gradio-app__gradio-2915 | [
"581",
"586"
] | db54b7b76a58aa331f64d0d55766dd9ef712e182 | diff --git a/website/homepage/src/docs/__init__.py b/website/homepage/src/docs/__init__.py
--- a/website/homepage/src/docs/__init__.py
+++ b/website/homepage/src/docs/__init__.py
@@ -148,6 +148,7 @@ def build(output_dir, jinja_env, gradio_wheel_url, gradio_version):
version="main",
gradio_version=gradio_version,
gradio_wheel_url=gradio_wheel_url,
+ canonical_suffix="/main"
)
output_folder = os.path.join(output_dir, "docs")
os.makedirs(output_folder)
@@ -167,7 +168,7 @@ def build_pip_template(version, jinja_env):
docs_files = os.listdir("src/docs")
template = jinja_env.get_template("docs/template.html")
output = template.render(
- docs=docs, find_cls=find_cls, version="pip", gradio_version=version, ordered_events=ordered_events
+ docs=docs, find_cls=find_cls, version="pip", gradio_version=version, canonical_suffix="", ordered_events=ordered_events
)
with open(f"src/docs/v{version}_template.html", "w+") as template_file:
template_file.write(output)
diff --git a/website/homepage/src/guides/__init__.py b/website/homepage/src/guides/__init__.py
--- a/website/homepage/src/guides/__init__.py
+++ b/website/homepage/src/guides/__init__.py
@@ -32,7 +32,7 @@ def format_name(guide_name):
guide_name = guide_name[guide_name.index("_") + 1 :]
if guide_name.lower().endswith(".md"):
guide_name = guide_name[:-3]
- pretty_guide_name = " ".join([word[0].upper() + word[1:] for word in guide_name.split("_")])
+ pretty_guide_name = " ".join([word[0].upper() + word[1:] for word in guide_name.split("-")])
return index, guide_name, pretty_guide_name
| Replace underscores with dash in slugs (`_` -> `-`).
Review and edit guides to be easily scannable + parsable by readers (and google)
We need to ensure users can quickly find and consume the content they are interested in and to ensure that robots like it too. This means ensuring the purpose of the content is clear, ensuring that the content is concise, and easy to digest.
- [ ] Add tldr style content at the top
- [ ] Ensure paragraphs are of an optimal length and that guides aren't too long, longer guides could be split into parts.
- [ ] Should we add some kind of CTAs in the content, follow on social media, contribute to the library, etc?
There are many articles like this but buffer talk about content + paragraph length later on. Buffer is a tool for publishing/ managing Social media channels etc so they should be relatively authoritative and the content seems to make sense. https://buffer.com/library/optimal-length-social-media/
| 2023-01-01T19:03:21 |
||
gradio-app/gradio | 2,921 | gradio-app__gradio-2921 | [
"1923"
] | 58b1a074ba342fe01445290d680a70c9304a9de1 | diff --git a/website/homepage/src/docs/__init__.py b/website/homepage/src/docs/__init__.py
--- a/website/homepage/src/docs/__init__.py
+++ b/website/homepage/src/docs/__init__.py
@@ -53,19 +53,20 @@ def add_demos():
add_demos()
+ordered_events = ["Change()", "Click()", "Submit()", "Edit()", "Clear()", "Play()", "Pause()", "Stream()", "Blur()", "Upload()"]
def add_supported_events():
for component in docs["component"]:
- component["events"] = []
+ component["events-list"] = []
event_listener_props = dir(EventListener)
for listener in EventListener.__subclasses__():
if not issubclass(component["class"], listener):
continue
for prop in dir(listener):
if prop not in event_listener_props:
- component["events"].append(prop + "()")
- if component["events"]:
- component["events"] = ", ".join(component["events"])
+ component["events-list"].append(prop + "()")
+ if component["events-list"]:
+ component["events"] = ", ".join(component["events-list"])
add_supported_events()
@@ -142,6 +143,7 @@ def build(output_dir, jinja_env, gradio_wheel_url, gradio_version):
template = jinja_env.get_template("docs/template.html")
output = template.render(
docs=docs,
+ ordered_events=ordered_events,
find_cls=find_cls,
version="main",
gradio_version=gradio_version,
@@ -165,7 +167,7 @@ def build_pip_template(version, jinja_env):
docs_files = os.listdir("src/docs")
template = jinja_env.get_template("docs/template.html")
output = template.render(
- docs=docs, find_cls=find_cls, version="pip", gradio_version=version
+ docs=docs, find_cls=find_cls, version="pip", gradio_version=version, ordered_events=ordered_events
)
with open(f"src/docs/v{version}_template.html", "w+") as template_file:
template_file.write(output)
| Create a matrix of that shows which events are valid for each component
In the discussion of #1892, @FarukOzderim and @pngwn think it would be a good idea to augment one of our guides with a matrix that shows the valid component/event combinations.
Would be cool to create this matrix as a gradio demo.
| 2023-01-03T13:43:51 |
||
gradio-app/gradio | 2,939 | gradio-app__gradio-2939 | [
"2937"
] | d229654673a74e5e818d57706379bf83c1ce6305 | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -513,6 +513,7 @@ def __init__(
self.output_components = None
self.__name__ = None
self.api_mode = None
+ self.progress_tracking = None
if self.analytics_enabled:
self.ip_address = utils.get_local_ip_address()
@@ -1096,6 +1097,10 @@ def __exit__(self, *args):
self.parent.children.extend(self.children)
self.config = self.get_config_file()
self.app = routes.App.create_app(self)
+ self.progress_tracking = any(
+ block_fn.fn is not None and special_args(block_fn.fn)[1] is not None
+ for block_fn in self.fns
+ )
@class_or_instancemethod
def load(
@@ -1313,10 +1318,6 @@ def reverse(text):
self.height = height
self.width = width
self.favicon_path = favicon_path
- self.progress_tracking = any(
- block_fn.fn is not None and special_args(block_fn.fn)[1] is not None
- for block_fn in self.fns
- )
if enable_queue is not None:
self.enable_queue = enable_queue
| diff --git a/test/test_routes.py b/test/test_routes.py
--- a/test/test_routes.py
+++ b/test/test_routes.py
@@ -203,6 +203,24 @@ def test_get_file_created_by_app(self):
file_response = client.get(f"/file={created_file}")
assert file_response.is_success
+ def test_mount_gradio_app(self):
+ app = FastAPI()
+
+ demo = gr.Interface(
+ lambda s: f"Hello from ps, {s}!", "textbox", "textbox"
+ ).queue()
+ demo1 = gr.Interface(
+ lambda s: f"Hello from py, {s}!", "textbox", "textbox"
+ ).queue()
+
+ app = gr.mount_gradio_app(app, demo, path="/ps")
+ app = gr.mount_gradio_app(app, demo1, path="/py")
+
+ # Use context manager to trigger start up events
+ with TestClient(app) as client:
+ assert client.get("/ps").is_success
+ assert client.get("/py").is_success
+
class TestGeneratorRoutes:
def test_generator(self):
| AttributeError: 'Blocks' object has no attribute 'progress_tracking'
### Describe the bug
from fastapi import FastAPI
import gradio as gr
app = FastAPI()
from ps import demo
from py import demo1
app = gr.mount_gradio_app(app, demo, path="/ps")
app = gr.mount_gradio_app(app, demo1, path="/py")
File "D:\ProgramData\Anaconda3\lib\site-packages\starlette\routing.py", line 671, in lifespan
async with self.lifespan_context(app):
File "D:\ProgramData\Anaconda3\lib\site-packages\starlette\routing.py", line 566, in __aenter__
await self._router.startup()
File "D:\ProgramData\Anaconda3\lib\site-packages\starlette\routing.py", line 648, in startup
await handler()
File "D:\ProgramData\Anaconda3\lib\site-packages\gradio\routes.py", line 621, in start_queue
gradio_app.get_blocks().startup_events()
File "D:\ProgramData\Anaconda3\lib\site-packages\gradio\blocks.py", line 1667, in startup_events
utils.run_coro_in_background(self._queue.start, (self.progress_tracking,))
AttributeError: 'Blocks' object has no attribute 'progress_tracking'
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
3.15 ok 3.16 bug
### Screenshot
on
### Logs
```shell
on
```
### System Info
```shell
3.16
```
### Severity
annoying
| Thanks for letting us know @wuyuliyu !! | 2023-01-05T18:40:04 |
gradio-app/gradio | 2,993 | gradio-app__gradio-2993 | [
"2960"
] | 5c32ba31ba8e620f2285ed6d10f704775b3e3531 | diff --git a/gradio/interface.py b/gradio/interface.py
--- a/gradio/interface.py
+++ b/gradio/interface.py
@@ -706,8 +706,7 @@ def attach_interpretation_events(
interpretation_btn.click(
self.interpret_func,
inputs=self.input_components + self.output_components,
- outputs=interpretation_set
- or [] + [input_component_column, interpret_component_column], # type: ignore
+ outputs=(interpretation_set or []) + [input_component_column, interpret_component_column], # type: ignore
preprocess=False,
)
| diff --git a/test/test_interfaces.py b/test/test_interfaces.py
--- a/test/test_interfaces.py
+++ b/test/test_interfaces.py
@@ -238,6 +238,34 @@ def quadratic(num1: float, num2: float) -> float:
interpretation="default",
)
+ interpretation_id = None
+ for c in iface.config["components"]:
+ if c["props"].get("value") == "Interpret" and c.get("type") == "button":
+ interpretation_id = c["id"]
+
+ # Make sure the event is configured correctly.
+ interpretation_dep = next(
+ d
+ for d in iface.config["dependencies"]
+ if d["targets"] == [interpretation_id]
+ )
+ interpretation_comps = [
+ c["id"]
+ for c in iface.config["components"]
+ if c.get("type") == "interpretation"
+ ]
+ interpretation_columns = [
+ c["id"]
+ for c in iface.config["components"]
+ if c.get("type") == "column" and c["props"].get("variant") == "default"
+ ]
+ assert sorted(interpretation_dep["outputs"]) == sorted(
+ interpretation_comps + interpretation_columns
+ )
+ assert sorted(interpretation_dep["inputs"]) == sorted(
+ [c._id for c in iface.input_components + iface.output_components]
+ )
+
app, _, _ = iface.launch(prevent_thread_lock=True)
client = TestClient(app)
| Interpretation is not updating the input text visual
### Describe the bug
The interpretation in 3.161 is not updating the input text visual. The function hook is called but no updates. There is also a bug related to examples. Some extra text are appended in the text field after clicking on the example.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
https://gradio.app/demos/ under "Sentiment Analysis"
### Screenshot

### Logs
```shell
No logs
```
### System Info
```shell
3.16.1
```
### Severity
blocking upgrade to latest gradio version
| Hmm thanks for flagging the issue @yaogee, we'll take a look! | 2023-01-16T14:43:09 |
gradio-app/gradio | 2,999 | gradio-app__gradio-2999 | [
"2996"
] | 43f0bedf5259b0f5f95b61f99d72028bb0b922a0 | diff --git a/gradio/serializing.py b/gradio/serializing.py
--- a/gradio/serializing.py
+++ b/gradio/serializing.py
@@ -119,7 +119,7 @@ def serialize(
"""
if x is None or x == "":
return None
- filename = Path(load_dir) / x
+ filename = str(Path(load_dir) / x)
return {
"name": filename,
"data": processing_utils.encode_url_or_file_to_base64(
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -796,7 +796,6 @@ def test_component_functions(self):
assert isinstance(audio_input.preprocess(x_wav), str)
with pytest.raises(ValueError):
gr.Audio(type="unknown")
- audio_input = gr.Audio(type="numpy")
# Output functionalities
y_audio = gr.processing_utils.decode_base64_to_file(
@@ -829,6 +828,15 @@ def test_component_functions(self):
output2 = audio_output.postprocess(y_audio.name)
assert output1 == output2
+ def test_serialize(self):
+ audio_input = gr.Audio()
+ assert audio_input.serialize("test/test_files/audio_sample.wav") == {
+ "data": media_data.BASE64_AUDIO["data"],
+ "is_file": False,
+ "orig_name": "audio_sample.wav",
+ "name": "test/test_files/audio_sample.wav",
+ }
+
def test_tokenize(self):
"""
Tokenize, get_masked_inputs
| Error with gradio 3.16.x by calling a function from a loaded space
### Describe the bug
Hello ! I think there is a bug when calling a function from a loaded space while using the 3.16.x version of gradio.
No problem with the 3.15.0 version.
See below the logs from my "Whisper to Stable Diffusion" HF space, calling the spaces/sanchit-gandhi/whisper-large-v2 (whisper) :
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
To reproduce the bug, You can duplicate this space : https://huggingface.co/spaces/fffiloni/whisper-to-stable-diffusion
and then change the gradio version from 3.15.0 to 3.16.x up to .2
### Screenshot
_No response_
### Logs
```shell
Traceback (most recent call last):
File "/home/user/.local/lib/python3.8/site-packages/gradio/routes.py", line 337, in run_predict
output = await app.get_blocks().process_api(
File "/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py", line 1015, in process_api
result = await self.call_function(
File "/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py", line 833, in call_function
prediction = await anyio.to_thread.run_sync(
File "/home/user/.local/lib/python3.8/site-packages/anyio/to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/home/user/.local/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "/home/user/.local/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 867, in run
result = context.run(func, *args)
File "app.py", line 85, in translate_better
transcribe_text_result = whisper(audio, None, "transcribe", api_name="predict")
File "/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py", line 760, in __call__
outputs = utils.synchronize_async(
File "/home/user/.local/lib/python3.8/site-packages/gradio/utils.py", line 377, in synchronize_async
return fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args, **kwargs)
File "/home/user/.local/lib/python3.8/site-packages/fsspec/asyn.py", line 98, in sync
raise return_result
File "/home/user/.local/lib/python3.8/site-packages/fsspec/asyn.py", line 53, in _runner
result[0] = await coro
File "/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py", line 1015, in process_api
result = await self.call_function(
File "/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py", line 833, in call_function
prediction = await anyio.to_thread.run_sync(
File "/home/user/.local/lib/python3.8/site-packages/anyio/to_thread.py", line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/home/user/.local/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 937, in run_sync_in_worker_thread
return await future
File "/home/user/.local/lib/python3.8/site-packages/anyio/_backends/_asyncio.py", line 867, in run
result = context.run(func, *args)
File "/home/user/.local/lib/python3.8/site-packages/gradio/external.py", line 383, in fn
data = json.dumps({"data": data, "fn_index": fn_index})
File "/usr/local/lib/python3.8/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/usr/local/lib/python3.8/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/local/lib/python3.8/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/usr/local/lib/python3.8/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type PosixPath is not JSON serializable
```
### System Info
Huggingface Hub
Gradio version : 1.16.x
### Severity
annoying
| Thanks @fffiloni I think I know what the problem is! | 2023-01-17T13:43:35 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.