repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
gradio-app/gradio | 3,011 | gradio-app__gradio-3011 | [
"2982"
] | f7f5398e4c57ec174466fa5160c21bcfb8d83fe6 | diff --git a/gradio/external.py b/gradio/external.py
--- a/gradio/external.py
+++ b/gradio/external.py
@@ -63,6 +63,32 @@ def load_blocks_from_repo(
return blocks
+def chatbot_preprocess(text, state):
+ payload = {
+ "inputs": {"generated_responses": None, "past_user_inputs": None, "text": text}
+ }
+ if state is not None:
+ payload["inputs"]["generated_responses"] = state["conversation"][
+ "generated_responses"
+ ]
+ payload["inputs"]["past_user_inputs"] = state["conversation"][
+ "past_user_inputs"
+ ]
+
+ return payload
+
+
+def chatbot_postprocess(response):
+ response_json = response.json()
+ chatbot_value = list(
+ zip(
+ response_json["conversation"]["past_user_inputs"],
+ response_json["conversation"]["generated_responses"],
+ )
+ )
+ return chatbot_value, response_json
+
+
def from_model(model_name: str, api_key: str | None, alias: str | None, **kwargs):
model_url = "https://huggingface.co/{}".format(model_name)
api_url = "https://api-inference.huggingface.co/models/{}".format(model_name)
@@ -76,7 +102,6 @@ def from_model(model_name: str, api_key: str | None, alias: str | None, **kwargs
response.status_code == 200
), f"Could not find model: {model_name}. If it is a private or gated model, please provide your Hugging Face access token (https://huggingface.co/settings/tokens) as the argument for the `api_key` parameter."
p = response.json().get("pipeline_tag")
-
pipelines = {
"audio-classification": {
# example model: ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition
@@ -101,6 +126,12 @@ def from_model(model_name: str, api_key: str | None, alias: str | None, **kwargs
"preprocess": to_binary,
"postprocess": lambda r: r.json()["text"],
},
+ "conversational": {
+ "inputs": [components.Textbox(), components.State()], # type: ignore
+ "outputs": [components.Chatbot(), components.State()], # type: ignore
+ "preprocess": chatbot_preprocess,
+ "postprocess": chatbot_postprocess,
+ },
"feature-extraction": {
# example model: julien-c/distilbert-feature-extraction
"inputs": components.Textbox(label="Input"),
@@ -125,6 +156,12 @@ def from_model(model_name: str, api_key: str | None, alias: str | None, **kwargs
{i["label"].split(", ")[0]: i["score"] for i in r.json()}
),
},
+ "image-to-text": {
+ "inputs": components.Image(type="filepath", label="Input Image"),
+ "outputs": components.Textbox(),
+ "preprocess": to_binary,
+ "postprocess": lambda r: r.json()[0]["generated_text"],
+ },
"question-answering": {
# Example: deepset/xlm-roberta-base-squad2
"inputs": [
@@ -311,7 +348,12 @@ def query_huggingface_api(*params):
}
kwargs = dict(interface_info, **kwargs)
- kwargs["_api_mode"] = True # So interface doesn't run pre/postprocess.
+
+ # So interface doesn't run pre/postprocess
+ # except for conversational interfaces which
+ # are stateful
+ kwargs["_api_mode"] = p != "conversational"
+
interface = gradio.Interface(**kwargs)
return interface
| diff --git a/test/test_external.py b/test/test_external.py
--- a/test/test_external.py
+++ b/test/test_external.py
@@ -228,6 +228,28 @@ def test_numerical_to_label_space(self):
except TooManyRequestsError:
pass
+ def test_image_to_text(self):
+ io = gr.Interface.load("models/nlpconnect/vit-gpt2-image-captioning")
+ try:
+ output = io("gradio/test_data/lion.jpg")
+ assert isinstance(output, str)
+ except TooManyRequestsError:
+ pass
+
+ def test_conversational(self):
+ io = gr.Interface.load("models/microsoft/DialoGPT-medium")
+ app, _, _ = io.launch(prevent_thread_lock=True)
+ client = TestClient(app)
+ assert app.state_holder == {}
+ response = client.post(
+ "/api/predict/",
+ json={"session_hash": "foo", "data": ["Hi!", None], "fn_index": 0},
+ )
+ output = response.json()
+ assert isinstance(output["data"], list)
+ assert isinstance(output["data"][0], list)
+ assert isinstance(app.state_holder["foo"], dict)
+
def test_speech_recognition_model(self):
io = gr.Interface.load("models/facebook/wav2vec2-base-960h")
try:
| Add conversational pipeline to Interface.load
- [X] I have searched to see if a similar issue already exists.
When trying to use the HuggingFace hosted Chatbot-models the Interface.load fails with the error:
`ValueError: Unsupported pipeline type: conversational`
The hosted models on HuggingFace are really useful for testing out models without having to download them. And also makes it possible to use large models while developing on a laptop.
So it would be very useful to be able to use conversational models via the HTTP load functionality.
| 2023-01-18T17:52:59 |
|
gradio-app/gradio | 3,048 | gradio-app__gradio-3048 | [
"3043"
] | c85572192e28f85d4d75fda9c6b3431c2ecf352d | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -2250,6 +2250,10 @@ def __init__(
"""
self.file_count = file_count
self.file_types = file_types
+ if file_types is not None and not isinstance(file_types, list):
+ raise ValueError(
+ f"Parameter file_types must be a list. Received {file_types.__class__.__name__}"
+ )
valid_types = [
"file",
"binary",
@@ -2982,6 +2986,10 @@ def __init__(
"""
self.type = type
self.file_count = file_count
+ if file_types is not None and not isinstance(file_types, list):
+ raise ValueError(
+ f"Parameter file_types must be a list. Received {file_types.__class__.__name__}"
+ )
self.file_types = file_types
self.label = label
TempFileManager.__init__(self)
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -942,6 +942,12 @@ def test_component_functions(self):
output2 = file_input.postprocess("test/test_files/sample_file.pdf")
assert output1 == output2
+ def test_file_type_must_be_list(self):
+ with pytest.raises(
+ ValueError, match="Parameter file_types must be a list. Received str"
+ ):
+ gr.File(file_types=".json")
+
def test_in_interface_as_input(self):
"""
Interface, process
@@ -983,6 +989,12 @@ def test_component_functions(self):
input2 = upload_input.preprocess(x_file)
assert input1.name == input2.name
+ def test_raises_if_file_types_is_not_list(self):
+ with pytest.raises(
+ ValueError, match="Parameter file_types must be a list. Received int"
+ ):
+ gr.UploadButton(file_types=2)
+
class TestDataframe:
def test_component_functions(self):
| `gr.File` with `file_types="image"` breaks front-end rendering
### Describe the bug
If a `gr.File` contains the `file_types="image"` the front-end never renders due to a `Uncaught (in promise) TypeError: i.map is not a function` in the browser console

### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```py
import gradio as gr
with gr.Blocks() as demo:
gr.File(label="Test", file_count="multiple", interactive=True, file_types="image")
demo.launch()
```
https://huggingface.co/spaces/multimodalart/grFileBug
### Screenshot
_No response_
### Logs
```shell
FileUpload.svelte:49 Uncaught (in promise) TypeError: i.map is not a function
at Ce (FileUpload.svelte:49:27)
at Pt (index.4395ab38.js:4:6005)
at new Ee (FileUpload.svelte:60:30)
at Array.Me (File.svelte:57:16)
at Array.Se (File.svelte:40:16)
at jn (index.4395ab38.js:1:1425)
at _n (index.4395ab38.js:76:13914)
at Wc (index.4395ab38.js:76:14690)
at Pt (index.4395ab38.js:4:6206)
at new Zc (index.4395ab38.js:76:16107)
```
### System Info
```shell
gradio==3.16.2
```
### Severity
blocking upgrade to latest gradio version
| PS: Works perfectly in Gradio 3.15, but breaks from `3.16.0` to `3.16.2`
@apolinario Can you try with
`gr.File(label="Test", file_count="multiple", interactive=True, file_types=["image"]) `
Works for me on main right now
<img width="1301" alt="image" src="https://user-images.githubusercontent.com/41651716/214081132-b311bef4-5183-4cbe-bc51-40ed94c9b2b8.png">
That works! Somehow I had old code that did not use a list and it used to work - so that broke on 3.16.x - but as a list it did work!
Glad it works! Will put up a PR to check that `file_types` is a list and raise an error to prevent front-end crashing | 2023-01-24T14:17:28 |
gradio-app/gradio | 3,049 | gradio-app__gradio-3049 | [
"2472"
] | 9599772fd6ceb822846a7096ea55912d2b513e6a | diff --git a/gradio/routes.py b/gradio/routes.py
--- a/gradio/routes.py
+++ b/gradio/routes.py
@@ -184,8 +184,14 @@ def login(form_data: OAuth2PasswordRequestForm = Depends()):
) or (callable(app.auth) and app.auth.__call__(username, password)):
token = secrets.token_urlsafe(16)
app.tokens[token] = username
- response = RedirectResponse(url="/", status_code=status.HTTP_302_FOUND)
- response.set_cookie(key="access-token", value=token, httponly=True)
+ response = JSONResponse(content={"success": True})
+ response.set_cookie(
+ key="access-token",
+ value=token,
+ httponly=True,
+ samesite="none",
+ secure=True,
+ )
return response
else:
raise HTTPException(status_code=400, detail="Incorrect credentials.")
@@ -206,6 +212,7 @@ def main(request: fastapi.Request, user: str = Depends(get_current_user)):
config = {
"auth_required": True,
"auth_message": blocks.auth_message,
+ "is_space": app.get_blocks().is_space,
}
try:
| diff --git a/test/test_blocks.py b/test/test_blocks.py
--- a/test/test_blocks.py
+++ b/test/test_blocks.py
@@ -1338,7 +1338,7 @@ async def say_hello(name):
data={"username": "abc", "password": "123"},
follow_redirects=False,
)
- assert resp.status_code == 302
+ assert resp.status_code == 200
token = resp.cookies.get("access-token")
assert token
| Authentication doesnt work if the app is embedded in a frame
### Describe the bug
Pretty self explanatory, everything is working fine when I embed the gradio app in a div element except the login, which fails without any error message to the console of the browser.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Any gradio app that has authentication and is embedded as follows:
```
<html>
<head>
<meta name="keywords" content="" />
<meta name="description" content="" />
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
</head>
<frameset rows="100%">
<frame src="http://18.195.21.198:7860/" title="mytitle" frameborder="0" noresize="noresize"/>
<noframes>
<body>
<p><a href="http://mygradiohosting:7860">http://mywebsite.com</a> </p>
</body>
</noframes>
</frameset>
</html>
```
### Screenshot
_No response_
### Logs
```shell
No logs in gradio nor in the browser console.
```
### System Info
```shell
Gradio 3.4, Google Chrome
```
### Severity
annoying
| Thanks @ValentinKoch! @aliabid94 do you know if this is fixed by #2112?
Should add that i am hosting on aws ec2 maybe :) if that changes things
Could be related to #2266, probably a CORS issue. | 2023-01-24T14:41:05 |
gradio-app/gradio | 3,089 | gradio-app__gradio-3089 | [
"1883"
] | 431a987d612d5e43097898d13523fb72bdf84214 | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -1232,6 +1232,7 @@ def queue(
blocks_dependencies=self.dependencies,
)
self.config = self.get_config_file()
+ self.app = routes.App.create_app(self)
return self
def launch(
diff --git a/gradio/routes.py b/gradio/routes.py
--- a/gradio/routes.py
+++ b/gradio/routes.py
@@ -481,6 +481,7 @@ async def join_queue(
async def get_queue_status():
return app.get_blocks()._queue.get_estimation()
+ @app.on_event("startup")
@app.get("/startup-events")
async def startup_events():
if not app.startup_events_triggered:
| diff --git a/test/test_blocks.py b/test/test_blocks.py
--- a/test/test_blocks.py
+++ b/test/test_blocks.py
@@ -16,12 +16,14 @@
import mlflow
import pytest
+import uvicorn
import wandb
import websockets
from fastapi.testclient import TestClient
import gradio as gr
from gradio.exceptions import DuplicateBlockError
+from gradio.networking import Server, get_first_available_port
from gradio.test_data.blocks_configs import XRAY_CONFIG
from gradio.utils import assert_configs_are_equivalent_besides_ids
@@ -323,6 +325,39 @@ async def test_restart_after_close(self):
completed = True
assert msg["output"]["data"][0] == "Victor"
+ @pytest.mark.asyncio
+ async def test_run_without_launching(self):
+ """Test that we can start the app and use queue without calling .launch().
+
+ This is essentially what the 'gradio' reload mode does
+ """
+
+ port = get_first_available_port(7860, 7870)
+
+ io = gr.Interface(lambda s: s, gr.Textbox(), gr.Textbox()).queue()
+
+ config = uvicorn.Config(app=io.app, port=port, log_level="warning")
+
+ server = Server(config=config)
+ server.run_in_thread()
+
+ try:
+ async with websockets.connect(f"ws://localhost:{port}/queue/join") as ws:
+ completed = False
+ while not completed:
+ msg = json.loads(await ws.recv())
+ if msg["msg"] == "send_data":
+ await ws.send(json.dumps({"data": ["Victor"], "fn_index": 0}))
+ if msg["msg"] == "send_hash":
+ await ws.send(
+ json.dumps({"fn_index": 0, "session_hash": "shdce"})
+ )
+ if msg["msg"] == "process_completed":
+ completed = True
+ assert msg["output"]["data"][0] == "Victor"
+ finally:
+ server.close()
+
class TestComponentsInBlocks:
def test_slider_random_value_config(self):
| Warn enable_queue in reload mode
**Is your feature request related to a problem? Please describe.**
Warn the user when enable_queue is true in the reload mode(gradio app.py)
**Additional context**
Since we launch the app with uvicorn in the reload mode, only fastapi app is run. We could make workarounds like triggering an endpoint to the start queueing, however I don't think it is necessary.
| 2023-01-30T21:36:35 |
|
gradio-app/gradio | 3,091 | gradio-app__gradio-3091 | [
"2287"
] | 5264b4c6ff086844946d5e17e4bbc4554766bcb3 | diff --git a/gradio/processing_utils.py b/gradio/processing_utils.py
--- a/gradio/processing_utils.py
+++ b/gradio/processing_utils.py
@@ -52,7 +52,12 @@ def to_binary(x: str | Dict) -> bytes:
def decode_base64_to_image(encoding: str) -> Image.Image:
content = encoding.split(";")[1]
image_encoded = content.split(",")[1]
- return Image.open(BytesIO(base64.b64decode(image_encoded)))
+ img = Image.open(BytesIO(base64.b64decode(image_encoded)))
+ exif = img.getexif()
+ # 274 is the code for image rotation and 1 means "correct orientation"
+ if exif.get(274, 1) != 1 and hasattr(ImageOps, "exif_transpose"):
+ img = ImageOps.exif_transpose(img)
+ return img
def encode_url_or_file_to_base64(path: str | Path, encryption_key: bytes | None = None):
| diff --git a/test/test_processing_utils.py b/test/test_processing_utils.py
--- a/test/test_processing_utils.py
+++ b/test/test_processing_utils.py
@@ -73,6 +73,14 @@ def test_encode_pil_to_base64_keeps_pnginfo(self):
assert decoded_image.info == input_img.info
+ @patch("PIL.Image.Image.getexif", return_value={274: 3})
+ @patch("PIL.ImageOps.exif_transpose")
+ def test_base64_to_image_does_rotation(self, mock_rotate, mock_exif):
+ input_img = Image.open("gradio/test_data/test_image.png")
+ base64 = processing_utils.encode_pil_to_base64(input_img)
+ processing_utils.decode_base64_to_image(base64)
+ mock_rotate.assert_called_once()
+
def test_resize_and_crop(self):
img = Image.open("gradio/test_data/test_image.png")
new_img = processing_utils.resize_and_crop(img, (20, 20))
| images rotated
### Describe the bug
When I upload an image on my iphone it gets rotated vs. uploading the same image on desktop.
I assume this might have something to do with the iphone image upload being HEIC.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
https://huggingface.co/spaces/krrishD/suitify_v1
### Screenshot
_No response_
### Logs
```shell
no debug output for this
```
### System Info
```shell
Using gradio on my iphone
```
### Severity
serious, I can't work around it
| I've noticed this issue before, it seems to be a general problem with image uploading systems. @dawoodkhan82 would you be able to take a look at this?
I only run into this issue when I select the "Take Photo or Video" option:

This doesn't happen to me when uploading via an image component

But it does happen to me when using UploadButton + Chatbot in https://huggingface.co/spaces/ysharma/InstructPix2Pix_Chatbot

Reproduced here: https://www.loom.com/share/d62c8614873f4294b6169fa637976b97
This is the space I used in the loom (its public): https://huggingface.co/spaces/lakshman111/imageprocessingdebugging
The space just echos the user's image back to them, and it comes back horizontal when I used an iPhone and took a vertical image.
@freddyaboulton it looks like the issue isn't happening on upload, but rather happening during pre or post processing | 2023-01-30T23:20:20 |
gradio-app/gradio | 3,117 | gradio-app__gradio-3117 | [
"3119"
] | 63d5efcfc464ff82877cd89b778231c23c3109ad | diff --git a/demo/video_component/run.py b/demo/video_component/run.py
--- a/demo/video_component/run.py
+++ b/demo/video_component/run.py
@@ -1,8 +1,22 @@
-import gradio as gr
+import gradio as gr
+import os
-css = "footer {display: none !important;} .gradio-container {min-height: 0px !important;}"
-with gr.Blocks(css=css) as demo:
- gr.Video()
+a = os.path.join(os.path.dirname(__file__), "files/world.mp4") # Video
+b = os.path.join(os.path.dirname(__file__), "files/a.mp4") # Video
+c = os.path.join(os.path.dirname(__file__), "files/b.mp4") # Video
-demo.launch()
\ No newline at end of file
+
+demo = gr.Interface(
+ fn=lambda x: x,
+ inputs=gr.Video(type="file"),
+ outputs=gr.Video(),
+ examples=[
+ [a],
+ [b],
+ [c],
+ ],
+)
+
+if __name__ == "__main__":
+ demo.launch()
diff --git a/scripts/copy_demos.py b/scripts/copy_demos.py
--- a/scripts/copy_demos.py
+++ b/scripts/copy_demos.py
@@ -34,6 +34,7 @@ def copy_all_demos(source_dir: str, dest_dir: str):
"stt_or_tts",
"stream_audio",
"stream_frames",
+ "video_component",
"zip_files",
]
for demo in demos_to_copy:
| Video ui is jumpy when the value is changed
### Describe the bug
This video shows the issue:
https://user-images.githubusercontent.com/12937446/216451342-492e8d36-1f9d-4d23-a64c-abdaa8aa8a39.mov
I think the video is jumping from video size -> default size -> video size. It would be good if it at least didn't jump down to that inbetween size.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
https://user-images.githubusercontent.com/12937446/216451342-492e8d36-1f9d-4d23-a64c-abdaa8aa8a39.mov
### Screenshot
https://user-images.githubusercontent.com/12937446/216451342-492e8d36-1f9d-4d23-a64c-abdaa8aa8a39.mov
### Logs
```shell
no
```
### System Info
```shell
no
```
### Severity
annoying
| 2023-02-02T19:19:33 |
||
gradio-app/gradio | 3,124 | gradio-app__gradio-3124 | [
"2497"
] | ec2b68f554499f6cc4134fe2da59da6e0868320a | diff --git a/gradio/interface.py b/gradio/interface.py
--- a/gradio/interface.py
+++ b/gradio/interface.py
@@ -547,7 +547,7 @@ def render_input_column(
# as a proxy of whether the queue will be enabled.
# Using a generator function without the queue will raise an error.
if inspect.isgeneratorfunction(self.fn):
- stop_btn = Button("Stop", variant="stop")
+ stop_btn = Button("Stop", variant="stop", visible=False)
elif self.interface_type == InterfaceTypes.UNIFIED:
clear_btn = Button("Clear")
submit_btn = Button("Submit", variant="primary")
@@ -588,7 +588,7 @@ def render_output_column(
# is created. We use whether a generator function is provided
# as a proxy of whether the queue will be enabled.
# Using a generator function without the queue will raise an error.
- stop_btn = Button("Stop", variant="stop")
+ stop_btn = Button("Stop", variant="stop", visible=False)
if self.allow_flagging == "manual":
flag_btns = self.render_flag_btns()
elif self.allow_flagging == "auto":
@@ -643,10 +643,38 @@ def attach_submit_events(self, submit_btn: Button | None, stop_btn: Button | Non
)
else:
assert submit_btn is not None, "Submit button not rendered"
+ fn = self.fn
+ extra_output = []
+ if stop_btn:
+
+ # Wrap the original function to show/hide the "Stop" button
+ def fn(*args):
+ # The main idea here is to call the original function
+ # and append some updates to keep the "Submit" button
+ # hidden and the "Stop" button visible
+ # The 'finally' block hides the "Stop" button and
+ # shows the "submit" button. Having a 'finally' block
+ # will make sure the UI is "reset" even if there is an exception
+ try:
+ for output in self.fn(*args):
+ if len(self.output_components) == 1 and not self.batch:
+ output = [output]
+ output = [o for o in output]
+ yield output + [
+ Button.update(visible=False),
+ Button.update(visible=True),
+ ]
+ finally:
+ yield [
+ {"__type__": "generic_update"}
+ for _ in self.output_components
+ ] + [Button.update(visible=True), Button.update(visible=False)]
+
+ extra_output = [submit_btn, stop_btn]
pred = submit_btn.click(
- self.fn,
+ fn,
self.input_components,
- self.output_components,
+ self.output_components + extra_output,
api_name="predict",
scroll_to_output=True,
preprocess=not (self.api_mode),
@@ -655,11 +683,24 @@ def attach_submit_events(self, submit_btn: Button | None, stop_btn: Button | Non
max_batch_size=self.max_batch_size,
)
if stop_btn:
+ submit_btn.click(
+ lambda: (
+ submit_btn.update(visible=False),
+ stop_btn.update(visible=True),
+ ),
+ inputs=None,
+ outputs=[submit_btn, stop_btn],
+ queue=False,
+ )
stop_btn.click(
- None,
+ lambda: (
+ submit_btn.update(visible=True),
+ stop_btn.update(visible=False),
+ ),
inputs=None,
- outputs=None,
+ outputs=[submit_btn, stop_btn],
cancels=[pred],
+ queue=False,
)
def attach_clear_events(
| diff --git a/test/test_blocks.py b/test/test_blocks.py
--- a/test/test_blocks.py
+++ b/test/test_blocks.py
@@ -1039,6 +1039,51 @@ def iteration(a):
cancel.click(None, None, None, cancels=[click])
demo.queue().launch(prevent_thread_lock=True)
+ @pytest.mark.asyncio
+ async def test_cancel_button_for_interfaces(self):
+ def generate(x):
+ for i in range(4):
+ yield i
+ time.sleep(0.2)
+
+ io = gr.Interface(generate, gr.Textbox(), gr.Textbox()).queue()
+ stop_btn_id = next(
+ i for i, k in io.blocks.items() if getattr(k, "value", None) == "Stop"
+ )
+ assert not io.blocks[stop_btn_id].visible
+
+ io.launch(prevent_thread_lock=True)
+
+ async with websockets.connect(
+ f"{io.local_url.replace('http', 'ws')}queue/join"
+ ) as ws:
+ completed = False
+ checked_iteration = False
+ while not completed:
+ msg = json.loads(await ws.recv())
+ if msg["msg"] == "send_data":
+ await ws.send(json.dumps({"data": ["freddy"], "fn_index": 0}))
+ if msg["msg"] == "send_hash":
+ await ws.send(json.dumps({"fn_index": 0, "session_hash": "shdce"}))
+ if msg["msg"] == "process_generating" and isinstance(
+ msg["output"]["data"][0], str
+ ):
+ checked_iteration = True
+ assert msg["output"]["data"][1:] == [
+ {"visible": False, "__type__": "update"},
+ {"visible": True, "__type__": "update"},
+ ]
+ if msg["msg"] == "process_completed":
+ assert msg["output"]["data"] == [
+ {"__type__": "update"},
+ {"visible": True, "__type__": "update"},
+ {"visible": False, "__type__": "update"},
+ ]
+ completed = True
+ assert checked_iteration
+
+ io.close()
+
class TestEvery:
def test_raise_exception_if_parameters_invalid(self):
diff --git a/test/test_routes.py b/test/test_routes.py
--- a/test/test_routes.py
+++ b/test/test_routes.py
@@ -277,7 +277,7 @@ def generator(string):
headers={"Authorization": f"Bearer {app.queue_token}"},
)
output = dict(response.json())
- assert output["data"] == ["a"]
+ assert output["data"][0] == "a"
response = client.post(
"/api/predict/",
@@ -285,7 +285,7 @@ def generator(string):
headers={"Authorization": f"Bearer {app.queue_token}"},
)
output = dict(response.json())
- assert output["data"] == ["b"]
+ assert output["data"][0] == "b"
response = client.post(
"/api/predict/",
@@ -293,7 +293,7 @@ def generator(string):
headers={"Authorization": f"Bearer {app.queue_token}"},
)
output = dict(response.json())
- assert output["data"] == ["c"]
+ assert output["data"][0] == "c"
response = client.post(
"/api/predict/",
@@ -301,7 +301,11 @@ def generator(string):
headers={"Authorization": f"Bearer {app.queue_token}"},
)
output = dict(response.json())
- assert output["data"] == [None]
+ assert output["data"] == [
+ {"__type__": "update"},
+ {"__type__": "update", "visible": True},
+ {"__type__": "update", "visible": False},
+ ]
response = client.post(
"/api/predict/",
@@ -309,7 +313,15 @@ def generator(string):
headers={"Authorization": f"Bearer {app.queue_token}"},
)
output = dict(response.json())
- assert output["data"] == ["a"]
+ assert output["data"][0] is None
+
+ response = client.post(
+ "/api/predict/",
+ json={"data": ["abc"], "fn_index": 0, "session_hash": "11"},
+ headers={"Authorization": f"Bearer {app.queue_token}"},
+ )
+ output = dict(response.json())
+ assert output["data"][0] == "a"
class TestApp:
| Modify interfaces with generators so that submit button turns into stop button
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
As of gradio 3.6, interfaces that use generators will automatically have a Stop button added. It would be nicer if the submit button turned into a stop button when you clicked it and the stop button automatically turned back into a submit button when the prediction finished or when it was clicked.
| 2023-02-03T16:52:42 |
|
gradio-app/gradio | 3,126 | gradio-app__gradio-3126 | [
"3035"
] | 84afc51484a54cb1250186f74ce4b2eeaaa79da3 | diff --git a/gradio/ipython_ext.py b/gradio/ipython_ext.py
--- a/gradio/ipython_ext.py
+++ b/gradio/ipython_ext.py
@@ -3,6 +3,8 @@
except ImportError:
pass
+import warnings
+
import gradio
@@ -12,6 +14,8 @@ def load_ipython_extension(ipython):
@register_cell_magic
@needs_local_scope
def blocks(line, cell, local_ns=None):
+ if "gr.Interface" in cell:
+ warnings.warn("Usage of gr.Interface with %%blocks may result in errors.")
with __demo.clear():
exec(cell, None, local_ns)
__demo.launch(quiet=True)
| %%blocks cell magic doesn't work
### Describe the bug
Unable to use %%blocks in Colab. The following code snippet is just a random one copied from Gradio example.
```py
%%blocks
def generate(text):
"hi"
examples = [
["The Moon's orbit around Earth has"],
["The smooth Borealis basin in the Northern Hemisphere covers 40%"],
]
demo = gr.Interface(
fn=generate,
inputs=gr.inputs.Textbox(lines=5, label="Input Text"),
outputs=gr.outputs.Textbox(label="Generated Text"),
examples=examples
)
```
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
https://colab.research.google.com/drive/1ziLOFs-nSyUgUeo4WBkf1TIfrLPxkgDu?usp=sharing
### Screenshot
<img width="939" alt="image" src="https://user-images.githubusercontent.com/38108242/213739638-7853b185-f769-4c3d-8005-c65902fb5185.png">
### Logs
```shell
---------------------------------------------------------------------------
DuplicateBlockError Traceback (most recent call last)
<ipython-input-18-0e7a87c35f47> in <module>
----> 1 get_ipython().run_cell_magic('blocks', '', '\n\n\ndef generate(text):\n "hi"\n\nexamples = [\n ["The Moon\'s orbit around Earth has"],\n ["The smooth Borealis basin in the Northern Hemisphere covers 40%"],\n]\n\ndemo = gr.Interface(\n fn=generate,\n inputs=gr.inputs.Textbox(lines=5, label="Input Text"),\n outputs=gr.outputs.Textbox(label="Generated Text"),\n examples=examples\n)\n')
5 frames
/usr/local/lib/python3.8/dist-packages/gradio/blocks.py in render(self)
85 """
86 if Context.root_block is not None and self._id in Context.root_block.blocks:
---> 87 raise DuplicateBlockError(
88 f"A block with id: {self._id} has already been rendered in the current Blocks."
89 )
DuplicateBlockError: A block with id: 62 has already been rendered in the current Blocks.
```
### System Info
```shell
Colab, no GPU
```
### Severity
serious, but I can work around it
| Can confirm the issue, will look into it!
I think this only fails if you're running an interface.
<img width="1452" alt="image" src="https://user-images.githubusercontent.com/41651716/214561126-d1248e3b-7b74-4ff1-a0d9-10f180bba2a5.png">
Hmm this is kinda tricky since the cell magic defines a global blocks context that conflicts with the one created with the gr.Interface. | 2023-02-03T22:03:38 |
|
gradio-app/gradio | 3,196 | gradio-app__gradio-3196 | [
"3187"
] | fa094a03e231da55a2b1504780f707cba436178c | diff --git a/gradio/queueing.py b/gradio/queueing.py
--- a/gradio/queueing.py
+++ b/gradio/queueing.py
@@ -4,6 +4,7 @@
import copy
import sys
import time
+from asyncio import TimeoutError as AsyncTimeOutError
from collections import deque
from typing import Any, Deque, Dict, List, Tuple
@@ -205,7 +206,7 @@ async def broadcast_live_estimations(self) -> None:
if self.live_updates:
await self.broadcast_estimations()
- async def gather_event_data(self, event: Event) -> bool:
+ async def gather_event_data(self, event: Event, receive_timeout=60) -> bool:
"""
Gather data for the event
@@ -216,7 +217,20 @@ async def gather_event_data(self, event: Event) -> bool:
client_awake = await self.send_message(event, {"msg": "send_data"})
if not client_awake:
return False
- event.data = await self.get_message(event)
+ data, client_awake = await self.get_message(event, timeout=receive_timeout)
+ if not client_awake:
+ # In the event, we timeout due to large data size
+ # Let the client know, otherwise will hang
+ await self.send_message(
+ event,
+ {
+ "msg": "process_completed",
+ "output": {"error": "Time out uploading data to server"},
+ "success": False,
+ },
+ )
+ return False
+ event.data = data
return True
async def notify_clients(self) -> None:
@@ -424,21 +438,25 @@ async def process_events(self, events: List[Event], batch: bool) -> None:
# to start "from scratch"
await self.reset_iterators(event.session_hash, event.fn_index)
- async def send_message(self, event, data: Dict) -> bool:
+ async def send_message(self, event, data: Dict, timeout: float | int = 1) -> bool:
try:
- await event.websocket.send_json(data=data)
+ await asyncio.wait_for(
+ event.websocket.send_json(data=data), timeout=timeout
+ )
return True
except:
await self.clean_event(event)
return False
- async def get_message(self, event) -> PredictBody | None:
+ async def get_message(self, event, timeout=5) -> Tuple[PredictBody | None, bool]:
try:
- data = await event.websocket.receive_json()
- return PredictBody(**data)
- except:
+ data = await asyncio.wait_for(
+ event.websocket.receive_json(), timeout=timeout
+ )
+ return PredictBody(**data), True
+ except AsyncTimeOutError:
await self.clean_event(event)
- return None
+ return None, False
async def reset_iterators(self, session_hash: str, fn_index: int):
await AsyncRequest(
diff --git a/gradio/routes.py b/gradio/routes.py
--- a/gradio/routes.py
+++ b/gradio/routes.py
@@ -12,6 +12,7 @@
import secrets
import tempfile
import traceback
+from asyncio import TimeoutError as AsyncTimeOutError
from collections import defaultdict
from copy import deepcopy
from typing import Any, Dict, List, Optional, Type
@@ -479,8 +480,20 @@ async def join_queue(
await websocket.accept()
# In order to cancel jobs, we need the session_hash and fn_index
# to create a unique id for each job
- await websocket.send_json({"msg": "send_hash"})
- session_info = await websocket.receive_json()
+ try:
+ await asyncio.wait_for(
+ websocket.send_json({"msg": "send_hash"}), timeout=1
+ )
+ except AsyncTimeOutError:
+ return
+
+ try:
+ session_info = await asyncio.wait_for(
+ websocket.receive_json(), timeout=1
+ )
+ except AsyncTimeOutError:
+ return
+
event = Event(
websocket, session_info["session_hash"], session_info["fn_index"]
)
| diff --git a/test/test_queueing.py b/test/test_queueing.py
--- a/test/test_queueing.py
+++ b/test/test_queueing.py
@@ -1,3 +1,4 @@
+import asyncio
import os
import sys
from collections import deque
@@ -31,7 +32,7 @@ def queue() -> Queue:
@pytest.fixture()
def mock_event() -> Event:
- websocket = MagicMock()
+ websocket = AsyncMock()
event = Event(websocket=websocket, session_hash="test", fn_index=0)
yield event
@@ -53,9 +54,20 @@ async def test_stop_resume(self, queue: Queue):
@pytest.mark.asyncio
async def test_receive(self, queue: Queue, mock_event: Event):
+ mock_event.websocket.receive_json.return_value = {"data": ["test"], "fn": 0}
await queue.get_message(mock_event)
assert mock_event.websocket.receive_json.called
+ @pytest.mark.asyncio
+ async def test_receive_timeout(self, queue: Queue, mock_event: Event):
+ async def take_too_long():
+ await asyncio.sleep(1)
+
+ mock_event.websocket.receive_json = take_too_long
+ data, is_awake = await queue.get_message(mock_event, timeout=0.5)
+ assert data is None
+ assert not is_awake
+
@pytest.mark.asyncio
async def test_send(self, queue: Queue, mock_event: Event):
await queue.send_message(mock_event, {})
@@ -85,7 +97,7 @@ async def test_gather_event_data(self, queue: Queue, mock_event: Event):
queue.send_message = AsyncMock()
queue.get_message = AsyncMock()
queue.send_message.return_value = True
- queue.get_message.return_value = {"data": ["test"], "fn": 0}
+ queue.get_message.return_value = {"data": ["test"], "fn": 0}, True
assert await queue.gather_event_data(mock_event)
assert queue.send_message.called
@@ -95,6 +107,25 @@ async def test_gather_event_data(self, queue: Queue, mock_event: Event):
assert await queue.gather_event_data(mock_event)
assert not (queue.send_message.called)
+ @pytest.mark.asyncio
+ async def test_gather_event_data_timeout(self, queue: Queue, mock_event: Event):
+ async def take_too_long():
+ await asyncio.sleep(1)
+
+ queue.send_message = AsyncMock()
+ queue.send_message.return_value = True
+
+ mock_event.websocket.receive_json = take_too_long
+ is_awake = await queue.gather_event_data(mock_event, receive_timeout=0.5)
+ assert not is_awake
+
+ # Have to use awful [1][0][1] syntax cause of python 3.7
+ assert queue.send_message.call_args_list[1][0][1] == {
+ "msg": "process_completed",
+ "output": {"error": "Time out uploading data to server"},
+ "success": False,
+ }
+
class TestQueueEstimation:
def test_get_update_estimation(self, queue: Queue):
@@ -193,6 +224,8 @@ async def test_process_event_handles_error_sending_process_start_msg(
self, queue: Queue, mock_event: Event
):
mock_event.websocket.send_json = AsyncMock()
+ mock_event.websocket.receive_json.return_value = {"data": ["test"], "fn": 0}
+
mock_event.websocket.send_json.side_effect = ["2", ValueError("Can't connect")]
queue.call_prediction = AsyncMock()
mock_event.disconnect = AsyncMock()
@@ -260,6 +293,7 @@ async def test_process_event_handles_exception_in_is_generating_request(
async def test_process_event_handles_error_sending_process_completed_msg(
self, queue: Queue, mock_event: Event
):
+ mock_event.websocket.receive_json.return_value = {"data": ["test"], "fn": 0}
mock_event.websocket.send_json = AsyncMock()
mock_event.websocket.send_json.side_effect = [
"2",
@@ -289,6 +323,7 @@ async def test_process_event_handles_error_sending_process_completed_msg(
async def test_process_event_handles_exception_during_disconnect(
self, mock_request, queue: Queue, mock_event: Event
):
+ mock_event.websocket.receive_json.return_value = {"data": ["test"], "fn": 0}
mock_event.websocket.send_json = AsyncMock()
queue.call_prediction = AsyncMock(
return_value=MagicMock(has_exception=False, json=dict(is_generating=False))
| load events don't work well on HF spaces if queue enabled
### Describe the bug
I've noticed that some demos don't work well on HF spaces. If you go to them, you'll see that the queue is really large and doesn't go down.
The one thing these demos all have in common is that they have load events and the queue enabled.
* [blocks_random_slider](https://huggingface.co/spaces/gradio/blocks_random_slider)

* https://huggingface.co/spaces/gradio/blocks_random_slider_main

* https://huggingface.co/spaces/gradio/xgboost-income-prediction-with-explainability

* https://huggingface.co/spaces/gradio/native_plots

* https://huggingface.co/spaces/gradio/altair_plot

Running these demos locally works as expected.
The reason I think this is related to the queue is that this demo has load events with the queue disabled and it works well:
https://huggingface.co/spaces/gradio/timeseries-forecasting-with-prophet

### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
-
### Screenshot
-
### Logs
```shell
-
```
### System Info
```shell
-
```
### Severity
serious, but I can work around it
| The weird thing is that I duplicated one of these spaces and it's been working fine all day?
https://huggingface.co/spaces/freddyaboulton/blocks_random_slider | 2023-02-14T22:11:04 |
gradio-app/gradio | 3,212 | gradio-app__gradio-3212 | [
"1942"
] | a2b80ca5c6a73de6967fbba73c2e454dc35eb332 | diff --git a/demo/bokeh_plot/run.py b/demo/bokeh_plot/run.py
new file mode 100644
--- /dev/null
+++ b/demo/bokeh_plot/run.py
@@ -0,0 +1,94 @@
+import gradio as gr
+import xyzservices.providers as xyz
+from bokeh.plotting import figure
+from bokeh.tile_providers import get_provider
+from bokeh.models import ColumnDataSource, Whisker
+from bokeh.plotting import figure
+from bokeh.sampledata.autompg2 import autompg2 as df
+from bokeh.sampledata.penguins import data
+from bokeh.transform import factor_cmap, jitter, factor_mark
+
+
+def get_plot(plot_type):
+ if plot_type == "map":
+ tile_provider = get_provider(xyz.OpenStreetMap.Mapnik)
+ plot = figure(
+ x_range=(-2000000, 6000000),
+ y_range=(-1000000, 7000000),
+ x_axis_type="mercator",
+ y_axis_type="mercator",
+ )
+ plot.add_tile(tile_provider)
+ return plot
+ elif plot_type == "whisker":
+ classes = list(sorted(df["class"].unique()))
+
+ p = figure(
+ height=400,
+ x_range=classes,
+ background_fill_color="#efefef",
+ title="Car class vs HWY mpg with quintile ranges",
+ )
+ p.xgrid.grid_line_color = None
+
+ g = df.groupby("class")
+ upper = g.hwy.quantile(0.80)
+ lower = g.hwy.quantile(0.20)
+ source = ColumnDataSource(data=dict(base=classes, upper=upper, lower=lower))
+
+ error = Whisker(
+ base="base",
+ upper="upper",
+ lower="lower",
+ source=source,
+ level="annotation",
+ line_width=2,
+ )
+ error.upper_head.size = 20
+ error.lower_head.size = 20
+ p.add_layout(error)
+
+ p.circle(
+ jitter("class", 0.3, range=p.x_range),
+ "hwy",
+ source=df,
+ alpha=0.5,
+ size=13,
+ line_color="white",
+ color=factor_cmap("class", "Light6", classes),
+ )
+ return p
+ elif plot_type == "scatter":
+
+ SPECIES = sorted(data.species.unique())
+ MARKERS = ["hex", "circle_x", "triangle"]
+
+ p = figure(title="Penguin size", background_fill_color="#fafafa")
+ p.xaxis.axis_label = "Flipper Length (mm)"
+ p.yaxis.axis_label = "Body Mass (g)"
+
+ p.scatter(
+ "flipper_length_mm",
+ "body_mass_g",
+ source=data,
+ legend_group="species",
+ fill_alpha=0.4,
+ size=12,
+ marker=factor_mark("species", MARKERS, SPECIES),
+ color=factor_cmap("species", "Category10_3", SPECIES),
+ )
+
+ p.legend.location = "top_left"
+ p.legend.title = "Species"
+ return p
+
+with gr.Blocks() as demo:
+ with gr.Row():
+ plot_type = gr.Radio(value="scatter", choices=["scatter", "whisker", "map"])
+ plot = gr.Plot()
+ plot_type.change(get_plot, inputs=[plot_type], outputs=[plot])
+ demo.load(get_plot, inputs=[plot_type], outputs=[plot])
+
+
+if __name__ == "__main__":
+ demo.launch()
\ No newline at end of file
diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -4132,7 +4132,17 @@ def __init__(
)
def get_config(self):
- return {"value": self.value, **IOComponent.get_config(self)}
+ try:
+ import bokeh # type: ignore
+
+ bokeh_version = bokeh.__version__
+ except ImportError:
+ bokeh_version = None
+ return {
+ "value": self.value,
+ "bokeh_version": bokeh_version,
+ **IOComponent.get_config(self),
+ }
@staticmethod
def update(
@@ -4162,9 +4172,11 @@ def postprocess(self, y) -> Dict[str, str] | None:
if isinstance(y, (ModuleType, matplotlib.figure.Figure)):
dtype = "matplotlib"
out_y = processing_utils.encode_plot_to_base64(y)
- elif isinstance(y, dict):
+ elif "bokeh" in y.__module__:
dtype = "bokeh"
- out_y = json.dumps(y)
+ from bokeh.embed import json_item # type: ignore
+
+ out_y = json.dumps(json_item(y))
else:
is_altair = "altair" in y.__module__
if is_altair:
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -2017,7 +2017,9 @@ def test_dataset_calls_as_example(*mocks):
class TestScatterPlot:
+ @patch.dict("sys.modules", {"bokeh": MagicMock(__version__="3.0.3")})
def test_get_config(self):
+
assert gr.ScatterPlot().get_config() == {
"caption": None,
"elem_id": None,
@@ -2029,6 +2031,7 @@ def test_get_config(self):
"style": {},
"value": None,
"visible": True,
+ "bokeh_version": "3.0.3",
}
def test_no_color(self):
@@ -2199,6 +2202,7 @@ def test_scatterplot_accepts_fn_as_value(self):
class TestLinePlot:
+ @patch.dict("sys.modules", {"bokeh": MagicMock(__version__="3.0.3")})
def test_get_config(self):
assert gr.LinePlot().get_config() == {
"caption": None,
@@ -2211,6 +2215,7 @@ def test_get_config(self):
"style": {},
"value": None,
"visible": True,
+ "bokeh_version": "3.0.3",
}
def test_no_color(self):
@@ -2360,6 +2365,7 @@ def test_lineplot_accepts_fn_as_value(self):
class TestBarPlot:
+ @patch.dict("sys.modules", {"bokeh": MagicMock(__version__="3.0.3")})
def test_get_config(self):
assert gr.BarPlot().get_config() == {
"caption": None,
@@ -2372,6 +2378,7 @@ def test_get_config(self):
"style": {},
"value": None,
"visible": True,
+ "bokeh_version": "3.0.3",
}
def test_no_color(self):
| Bokeh plots do not appear
### Describe the bug
When using the `Plot` component, bokeh plots do not appear at all.
This is documented here: #1632
> Pictures cannot be generated in Bokeh mode.
And in this PR: #1609
> The Bokeh plots are currently broken. The reason for this is that bokehJS internally uses getElementById to get the container of the plot and render it. Since Gradio UI is using the shadow DOM to render, this step fails.
> I have tried here a workaround where I added a new hidden div in the index.html file to use as a helper to render the plot, once it is rendered, then the content is appended to the actual div that should have the plot. This part is all working, unfortunately although I can see the div with the expected content, the plot is still not showing.
The PR was closed because it was an attempt to fix the problem using an older version of bokeh. We still need to find a solution using the latest version of bokeh.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
https://github.com/gradio-app/gradio/blob/main/demo/outbreak_forecast/run.py
### Screenshot
_No response_
### Logs
```shell
N/A
```
### System Info
```shell
3.1.3
```
### Severity
serious, but I can work around it
| cc @dawoodkhan82 @Ian-GL
Is this Bug resolved in the latest release 3.4.0?
> Is this Bug resolved in the latest release 3.4.0?
Unfortunately not yet. It is on our radar though. Will update this issue when we do resolve this.
It looks like bokeh 3.0.3 is out, so it might be good to revisit this @dawoodkhan82
Will revisit!
I was trying to use bokeh maps. Got an error `TypeError: Model.to_json() missing 1 required positional argument: 'include_defaults'`
```python
import gradio as gr
import xyzservices.providers as xyz
from bokeh.plotting import figure
from bokeh.tile_providers import get_provider
def create_map(text):
tile_provider = get_provider(xyz.OpenStreetMap.Mapnik)
p = figure(x_range=(-2000000, 6000000), y_range=(-1000000, 7000000),
x_axis_type="mercator", y_axis_type="mercator")
p.add_tile(tile_provider)
return p
demo = gr.Interface(
fn=create_map,
inputs='text',
outputs=gr.Plot().style(),
)
demo.launch()
```
Will take a look @giswqs ! My guess is that the gradio library is using an outdated bokeh api that's not compatible with the bokeh version in your demo.
Mind sharing your bokeh version? As well as xyzservices? BTW what is that library?
See the example at https://docs.bokeh.org/en/latest/docs/examples/topics/geo/tile_xyzservices.html
[xyzservices](https://github.com/geopandas/xyzservices) is a lightweight library providing a repository of available XYZ services offering raster basemap tiles.
- gradio: 3.18.0
- bokeh: 2.4.3
- xyzservices: 2022.9.0
Thanks @giswqs ! So the issue about `to_json() missing 1 required positional argument` should be easy to fix but the problem about the UI not displaying the plots persists. What's more, looks like there's a big difference between bokeh 2.0 and bokeh 3.0. Might be hard to support both? If we had to choose one, would 3.0 be better?
Yes, prefer bokeh 3.x. | 2023-02-16T16:50:48 |
gradio-app/gradio | 3,223 | gradio-app__gradio-3223 | [
"3171"
] | 3530a86433ef7a27053dd6bd97a2d62947506d24 | diff --git a/gradio/routes.py b/gradio/routes.py
--- a/gradio/routes.py
+++ b/gradio/routes.py
@@ -148,7 +148,9 @@ def create_app(blocks: gradio.Blocks) -> App:
@app.get("/user")
@app.get("/user/")
def get_current_user(request: fastapi.Request) -> Optional[str]:
- token = request.cookies.get("access-token")
+ token = request.cookies.get("access-token") or request.cookies.get(
+ "access-token-unsecure"
+ )
return app.tokens.get(token)
@app.get("/login_check")
@@ -196,6 +198,9 @@ def login(form_data: OAuth2PasswordRequestForm = Depends()):
samesite="none",
secure=True,
)
+ response.set_cookie(
+ key="access-token-unsecure", value=token, httponly=True
+ )
return response
else:
raise HTTPException(status_code=400, detail="Incorrect credentials.")
| Cannot login to application when deployed on-premise
### Describe the bug
I cannot login to my gradio app when it's deployed in docker container on any server. I've tried on 2 different machines with different docker versions. After passing my credentials I'm not redirected to main but login page is refreshed. I've encountered this problem only on versions 3.17.x and 3.18.0 and only when deployed on some server. Everything works perfectly fine on my local machine, both when run locally or via docker.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
1. Dockerize gradio app with gradio authentication enabled and deploy on some server while exposing it
2. Open the app in your browser
3. Log in
4. Encounter page refresh with no redirection to actual app
### Screenshot
_No response_
### Logs
```shell
I have no logs for this. There's no error or logs neither on python side nor browser.
```
### System Info
```shell
Gradio version: 3.17.x, 3.18.0 (it worked previously on 3.16.2)
```
### Severity
blocking upgrade to latest gradio version
| Hi @Norbiox !
Do you have third party cookies enabled in your browser?
If you open the developer tools, there should be a request to the `login` route, would be helpful to see what that looks like to debug!
Hi,
Yes, I have enabled third party cookies.
I can't find this request. I'm attaching screenshots from before login and immediately after.
Before:

After:

I had the same issue and I was getting this warning in the firefox console:
`Cookie “access-token” has been rejected because a non-HTTPS cookie can’t be set as “secure”.`
Downgrading gradio to 3.16.2 allows login but obviously it is not a very good solution.
If I check the cookie set by gradio with version 3.16.2 it is indeed *not* set as "secure"
The change to make the cookie secure and "sameSite" was added in [this commit ](https://github.com/gradio-app/gradio/commit/9ccfef05421d1733400e3e73d83c756213f2ef29)
Though the commit message does mention it or explain why, as far as I can tell
Thanks for getting back to me @Norbiox and @nerochiaro !
I'm having trouble reproducing this issue.
I created this simple [app ](https://github.com/freddyaboulton/graido-auth) with auth (ignore the typo in the repo name 🙈). It is packaged with Docker and works well for me locally and when I deployed it to this url: https://gradio-auth.onrender.com/ . Credentials `admin/admin`
I tried in Chrome with and without incognito mode and also microsoft edge. I was able to log in correctly. There's a gif of that here:

My guess is that for some reason, your browser is rejecting the gradio cookie set during a successful login?
| 2023-02-17T21:32:20 |
|
gradio-app/gradio | 3,233 | gradio-app__gradio-3233 | [
"3229"
] | 56245276e701f7e4f81228af6e523d4c305af4ed | diff --git a/demo/calculator/run.py b/demo/calculator/run.py
--- a/demo/calculator/run.py
+++ b/demo/calculator/run.py
@@ -27,7 +27,7 @@ def calculator(num1, operation, num2):
[0, "subtract", 1.2],
],
title="Toy Calculator",
- description="Here's a sample toy calculator. Enjoy!",
+ description="Here's a sample toy calculator. Allows you to calculate things like $2+2=4$",
)
if __name__ == "__main__":
demo.launch()
diff --git a/gradio/interface.py b/gradio/interface.py
--- a/gradio/interface.py
+++ b/gradio/interface.py
@@ -307,7 +307,7 @@ def clean_html(raw_html):
"html": True,
},
)
- .use(dollarmath_plugin)
+ .use(dollarmath_plugin, renderer=utils.tex2svg, allow_digits=False)
.use(footnote_plugin)
.enable("table")
)
| Markdown: LaTeX font is black and unreadable in the dark theme
### Describe the bug
The LaTeX font is dark instead of light in the dark theme, rendering it unreadable.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```python
import gradio as gr
with gr.Blocks() as interface:
gr.Markdown(value="$ x = 1+1$")
interface.launch()
```
Run the script above.
* Under `http://127.0.0.1:7860/`, the equation is readable.
* Under `http://127.0.0.1:7860/?__theme=dark`, the equation is present but unreadable.
### Screenshot
_No response_
### Logs
```shell
-
```
### System Info
```shell
gradio==3.18.0
```
### Severity
annoying
| 2023-02-18T12:02:21 |
||
gradio-app/gradio | 3,277 | gradio-app__gradio-3277 | [
"3166",
"3248",
"3166"
] | 9c811ed8928c461610fa47e9b0fc450463225881 | diff --git a/demo/blocks_flipper/run.py b/demo/blocks_flipper/run.py
--- a/demo/blocks_flipper/run.py
+++ b/demo/blocks_flipper/run.py
@@ -1,12 +1,15 @@
import numpy as np
import gradio as gr
+
def flip_text(x):
return x[::-1]
+
def flip_image(x):
return np.fliplr(x)
+
with gr.Blocks() as demo:
gr.Markdown("Flip text or image files using this demo.")
with gr.Tab("Flip Text"):
@@ -24,6 +27,6 @@ def flip_image(x):
text_button.click(flip_text, inputs=text_input, outputs=text_output)
image_button.click(flip_image, inputs=image_input, outputs=image_output)
-
+
if __name__ == "__main__":
- demo.launch()
\ No newline at end of file
+ demo.launch()
diff --git a/demo/blocks_mask/run.py b/demo/blocks_mask/run.py
--- a/demo/blocks_mask/run.py
+++ b/demo/blocks_mask/run.py
@@ -1,5 +1,6 @@
import gradio as gr
from gradio.components import Markdown as md
+from PIL import Image
demo = gr.Blocks()
@@ -8,6 +9,9 @@
io2a = gr.Interface(lambda x: x, gr.Image(source="canvas"), gr.Image())
io2b = gr.Interface(lambda x: x, gr.Sketchpad(), gr.Image())
+io2c = gr.Interface(
+ lambda x: x, gr.Image(source="canvas", shape=(512, 512)), gr.Image()
+)
io3a = gr.Interface(
lambda x: [x["mask"], x["image"]],
@@ -53,6 +57,20 @@
)
+def save_image(image):
+ image.save("colorede.png")
+ return image
+
+
+img = Image.new("RGB", (512, 512), (150, 150, 150))
+img.save("image.png", "PNG")
+
+io5d = gr.Interface(
+ save_image,
+ gr.Image("image.png", source="upload", tool="color-sketch", type="pil"),
+ gr.Image(),
+)
+
with demo:
md("# Different Ways to Use the Image Input Component")
md(
@@ -71,6 +89,8 @@
"**2b. Black and White Sketchpad: `gr.Interface(lambda x: x, gr.Sketchpad(), gr.Image())`**"
)
io2b.render()
+ md("**2c. Black and White Sketchpad with `shape=(512,512)`**")
+ io2c.render()
md("**3a. Binary Mask with image upload:**")
md(
"""```python
@@ -130,7 +150,8 @@
io3b2.render()
with gr.Tab("Two"):
io3b3.render()
-
+ md("**5d. Color Sketchpad with image upload and a default images**")
+ io5d.render()
if __name__ == "__main__":
demo.launch()
diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -1379,6 +1379,7 @@ def __init__(
streaming: bool = False,
elem_id: str | None = None,
mirror_webcam: bool = True,
+ brush_radius: int | None = None,
**kwargs,
):
"""
@@ -1398,7 +1399,9 @@ def __init__(
streaming: If True when used in a `live` interface, will automatically stream webcam feed. Only valid is source is 'webcam'.
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
mirror_webcam: If True webcam will be mirrored. Default is True.
+ brush_radius: Size of the brush for Sketch. Default is None which chooses a sensible default
"""
+ self.brush_radius = brush_radius
self.mirror_webcam = mirror_webcam
valid_types = ["numpy", "pil", "filepath"]
if type not in valid_types:
@@ -1446,6 +1449,7 @@ def get_config(self):
"value": self.value,
"streaming": self.streaming,
"mirror_webcam": self.mirror_webcam,
+ "brush_radius": self.brush_radius,
**IOComponent.get_config(self),
}
@@ -1456,6 +1460,7 @@ def update(
show_label: bool | None = None,
interactive: bool | None = None,
visible: bool | None = None,
+ brush_radius: int | None = None,
):
updated_config = {
"label": label,
@@ -1463,6 +1468,7 @@ def update(
"interactive": interactive,
"visible": visible,
"value": value,
+ "brush_radius": brush_radius,
"__type__": "update",
}
return IOComponent.add_interactive_to_config(updated_config, interactive)
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -625,6 +625,7 @@ def test_component_functions(self):
source="upload", tool="editor", type="pil", label="Upload Your Image"
)
assert image_input.get_config() == {
+ "brush_radius": None,
"image_mode": "RGB",
"shape": None,
"source": "upload",
| Canvas drawing is cut off when the shape is explicitly set to be a square
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
Right now if we want to make a 512\*512 drawing canvas, the only method is (1) ask users to create a pure white 512\*512 image using third-party software like PhotoShop, then (2) ask users to import that blank image into gr.Image(source='upload', tool='sketch'), then (3) use the resulting mask as the user drawing for any applications. (Besides, the initial width of scribble can not be set by code.)
This is over-complicated. We should have an one-line function to make a simple drawing canvas.
**Describe the solution you'd like**
We may consider something like
gr.Image(source='blank', tool='sketch', size=(512, 512))
UI crashes in `gr.Image` with `tool="color-sketch"` and a pre-loaded image
### Describe the bug
Running the following code
```py
#Create a black image
img = Image.new("RGB", (512, 512), (0, 0, 0))
img.save("image.png", "PNG")
#Pre-load the black image in the component
gr.Image(value="image.png", interactive=True, tool="color-sketch")
```
Crashes the UI
<img width="635" alt="image" src="https://user-images.githubusercontent.com/788417/220176049-977ac494-1587-41a8-aafb-b2a961131925.png">
### Reproduction
Here's a colab with an example: https://colab.research.google.com/drive/1Gp5pp51P14B0tM331dS7476-Oqe-Bi04
### System Info
```shell
3.19.0
```
### Severity
serious, but I can work around it
Canvas drawing is cut off when the shape is explicitly set to be a square
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
Right now if we want to make a 512\*512 drawing canvas, the only method is (1) ask users to create a pure white 512\*512 image using third-party software like PhotoShop, then (2) ask users to import that blank image into gr.Image(source='upload', tool='sketch'), then (3) use the resulting mask as the user drawing for any applications. (Besides, the initial width of scribble can not be set by code.)
This is over-complicated. We should have an one-line function to make a simple drawing canvas.
**Describe the solution you'd like**
We may consider something like
gr.Image(source='blank', tool='sketch', size=(512, 512))
| Hi @lllyasviel this is already possible by doing:
```py
gr.Image(source="canvas", shape=(512, 512))
```
For example, you can test this with this Blocks demo:
```py
import gradio as gr
with gr.Blocks() as demo:
i = gr.Image(source="canvas", shape=(512, 512))
o = gr.Image()
i.change(lambda x:x, i, o)
demo.launch()
```
@abidlabs
No. Right now gradio does not support a simple 512\*512 drawing canvas.
pip install --upgrade gradio
then

Then the result will be like

As we can see, this has nothing to do with "shape=(512, 512)", the shape parameter does not control the resolution, it controls a "gradio-style dpi". You can even draw in this long rectangular:

When it is processed, the results provide strong evidence that "shape" is related to dpi and cropping, not shape of canvas

If this is an intentional design, it may be worthwhile to reopen this issue to target a more straightforward drawing board.
Hi @lllyasviel so to achieve what you want, I think we need to do control both the image resolution (with is controlled via the `shape` parameter above) and the display size of the Image component, which is controlled via the `.style()` method of the `Image` component. I agree that it's a bit confusing, but I think this should achieve what you want:
```py
import gradio as gr
with gr.Blocks() as demo:
i = gr.Image(source="canvas", shape=(512, 512)).style(width=512, height=512)
o = gr.Image().style(width=512, height=512)
i.change(lambda x:x, i, o)
demo.launch()
```
However, when I tested this, I got the canvas being cut off at the halfway point, preventing me from drawing in the bottom half of the canvas, which is very strange.
<img width="457" alt="image" src="https://user-images.githubusercontent.com/1778297/218155557-8f766207-6bd1-4848-9344-23380dd41a51.png">
I'm going to reopen this issue so that we can fix this. cc @pngwn
The UI equally crashes if I do something like:
```py
def update_image():
return gr.update(value="image.png")
image = gr.Image(interactive=True, tool="color-sketch")
# ...
demo.load(update_image, inputs=[], outputs=image)
```
And given that the `source="canvas"` can only initialize with a white canvas, I'm currently failing to see any way to initialise a `color-sketch` with a black image to be drawn on top
Actually uploading the image **does** work tho.
This is due to a resize loop, the canvas gets too big and eventually crashes. Surprised it isn't happening when uploading an image though.
Hi @lllyasviel this is already possible by doing:
```py
gr.Image(source="canvas", shape=(512, 512))
```
For example, you can test this with this Blocks demo:
```py
import gradio as gr
with gr.Blocks() as demo:
i = gr.Image(source="canvas", shape=(512, 512))
o = gr.Image()
i.change(lambda x:x, i, o)
demo.launch()
```
@abidlabs
No. Right now gradio does not support a simple 512\*512 drawing canvas.
pip install --upgrade gradio
then

Then the result will be like

As we can see, this has nothing to do with "shape=(512, 512)", the shape parameter does not control the resolution, it controls a "gradio-style dpi". You can even draw in this long rectangular:

When it is processed, the results provide strong evidence that "shape" is related to dpi and cropping, not shape of canvas

If this is an intentional design, it may be worthwhile to reopen this issue to target a more straightforward drawing board.
Hi @lllyasviel so to achieve what you want, I think we need to do control both the image resolution (with is controlled via the `shape` parameter above) and the display size of the Image component, which is controlled via the `.style()` method of the `Image` component. I agree that it's a bit confusing, but I think this should achieve what you want:
```py
import gradio as gr
with gr.Blocks() as demo:
i = gr.Image(source="canvas", shape=(512, 512)).style(width=512, height=512)
o = gr.Image().style(width=512, height=512)
i.change(lambda x:x, i, o)
demo.launch()
```
However, when I tested this, I got the canvas being cut off at the halfway point, preventing me from drawing in the bottom half of the canvas, which is very strange.
<img width="457" alt="image" src="https://user-images.githubusercontent.com/1778297/218155557-8f766207-6bd1-4848-9344-23380dd41a51.png">
I'm going to reopen this issue so that we can fix this. cc @pngwn | 2023-02-22T01:01:15 |
gradio-app/gradio | 3,315 | gradio-app__gradio-3315 | [
"3314"
] | e54042b43b4e4844a46632011a093ca02a5d603a | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -236,6 +236,10 @@ def set_event_trigger(
"batch": batch,
"max_batch_size": max_batch_size,
"cancels": cancels or [],
+ "types": {
+ "continuous": bool(every),
+ "generator": inspect.isgeneratorfunction(fn) or bool(every),
+ },
}
Context.root_block.dependencies.append(dependency)
return dependency
@@ -579,6 +583,8 @@ def iterate_over_children(children_list):
with block:
iterate_over_children(children)
+ derived_fields = ["types"]
+
with Blocks(theme=config["theme"], css=config["theme"]) as blocks:
# ID 0 should be the root Blocks component
original_mapping[0] = Context.root_block or blocks
@@ -596,6 +602,8 @@ def iterate_over_children(children_list):
# older demos
if dependency["trigger"] == "fake_event":
continue
+ for field in derived_fields:
+ dependency.pop(field, None)
targets = dependency.pop("targets")
trigger = dependency.pop("trigger")
dependency.pop("backend_fn")
| diff --git a/gradio/test_data/blocks_configs.py b/gradio/test_data/blocks_configs.py
--- a/gradio/test_data/blocks_configs.py
+++ b/gradio/test_data/blocks_configs.py
@@ -196,6 +196,7 @@
"max_batch_size": 4,
"cancels": [],
"every": None,
+ "types": {"continuous": False, "generator": False},
},
{
"targets": [39],
@@ -212,6 +213,7 @@
"max_batch_size": 4,
"cancels": [],
"every": None,
+ "types": {"continuous": False, "generator": False},
},
{
"targets": [],
@@ -228,6 +230,7 @@
"max_batch_size": 4,
"cancels": [],
"every": None,
+ "types": {"continuous": False, "generator": False},
},
],
}
@@ -431,6 +434,7 @@
"max_batch_size": 4,
"cancels": [],
"every": None,
+ "types": {"continuous": False, "generator": False},
},
{
"targets": [933],
@@ -447,6 +451,7 @@
"max_batch_size": 4,
"cancels": [],
"every": None,
+ "types": {"continuous": False, "generator": False},
},
{
"targets": [],
@@ -463,6 +468,7 @@
"max_batch_size": 4,
"cancels": [],
"every": None,
+ "types": {"continuous": False, "generator": False},
},
],
}
diff --git a/test/test_blocks.py b/test/test_blocks.py
--- a/test/test_blocks.py
+++ b/test/test_blocks.py
@@ -325,6 +325,37 @@ async def test_restart_after_close(self):
completed = True
assert msg["output"]["data"][0] == "Victor"
+ def test_function_types_documented_in_config(self):
+ def continuous_fn():
+ return 42
+
+ def generator_function():
+ for index in range(10):
+ yield index
+
+ with gr.Blocks() as demo:
+
+ gr.Number(value=lambda: 2, every=2)
+ meaning_of_life = gr.Number()
+ counter = gr.Number()
+ generator_btn = gr.Button(value="Generate")
+ greeting = gr.Textbox()
+ greet_btn = gr.Button(value="Greet")
+
+ greet_btn.click(lambda: "Hello!", inputs=None, outputs=[greeting])
+ generator_btn.click(generator_function, inputs=None, outputs=[counter])
+ demo.load(continuous_fn, inputs=None, outputs=[meaning_of_life], every=1)
+
+ for i, dependency in enumerate(demo.config["dependencies"]):
+ if i == 0:
+ assert dependency["types"] == {"continuous": True, "generator": True}
+ if i == 1:
+ assert dependency["types"] == {"continuous": False, "generator": False}
+ if i == 2:
+ assert dependency["types"] == {"continuous": False, "generator": True}
+ if i == 3:
+ assert dependency["types"] == {"continuous": True, "generator": True}
+
@pytest.mark.asyncio
async def test_run_without_launching(self):
"""Test that we can start the app and use queue without calling .launch().
| Document in the config if the block function is a generator and if it runs forever
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
Needed for the several clients as discussed in this thread: https://huggingface.slack.com/archives/C02SPHC1KD1/p1677190325992569
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Additional context**
Add any other context or screenshots about the feature request here.
| We actually already track `every` i think, it is just other generating functions. Maybe we can combine them in the config. Although it would be good to know if they run forever or have a finite number of runs, if we have that info. It might also be good to know if a prediction is the last one, although i don't think that is essential.
> Although it would be good to know if they run forever or have a finite number of runs, if we have that info.
Yea I was thinking of adding two fields to the dependency config
1. `continuous`: True if runs forever.
2. `generator`: True if its is a generating function.
1 implies 2 but not the other way around.
Would we remove `every` in this case? I think those names are better. Or we could add a `types` field with the function type(s). Either a list or an 'object'. Some degree of namespacing might be nice
I guess we can't remove anything as that might be breaking.
I like the suggestion to use `types` field! Let's not remove every for the reason you mentioned and I think it's good to know the expected frequency anyways. | 2023-02-24T17:42:21 |
gradio-app/gradio | 3,338 | gradio-app__gradio-3338 | [
"3319"
] | 9fbe7a06fbd1ff9f49318cb4d16b8e827420f206 | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -1259,7 +1259,8 @@ def __init__(
value=value,
**kwargs,
)
- self.cleared_value = self.value
+
+ self.cleared_value = self.value or ([] if multiselect else "")
def get_config(self):
return {
diff --git a/gradio/utils.py b/gradio/utils.py
--- a/gradio/utils.py
+++ b/gradio/utils.py
@@ -886,11 +886,11 @@ def tex2svg(formula, *args):
svg_start = xml_code.index("<svg ")
svg_code = xml_code[svg_start:]
svg_code = re.sub(r"<metadata>.*<\/metadata>", "", svg_code, flags=re.DOTALL)
- svg_code = re.sub(r' width="[^"]+"', '', svg_code)
+ svg_code = re.sub(r' width="[^"]+"', "", svg_code)
height_match = re.search(r'height="([\d.]+)pt"', svg_code)
if height_match:
height = float(height_match.group(1))
- new_height = height / FONTSIZE # conversion from pt to em
+ new_height = height / FONTSIZE # conversion from pt to em
svg_code = re.sub(r'height="[\d.]+pt"', f'height="{new_height}em"', svg_code)
copy_code = f"<span style='font-size: 0px'>{formula}</span>"
return f"{copy_code}{svg_code}"
| 3.16.0 introduced breaking change for Dropdown
### Describe the bug
The addition of the multiselect option to `Dropdown` introduced a breaking change for the Dropdown components output signature. The default case when there is no value (for example, an optional dropdown that a user does not interact with before submitting) is now an array (list on the python side) instead of an empty string. This means checks like `dropdown == ""` etc will now be False rather than True.
I think it is fine if users select an option and _then_ select nothing.
This is the offending line: https://github.com/gradio-app/gradio/pull/2871/files#diff-f0b03055e354ad5f6f0c4c006adba9a9fe60a07ca1790b75e20cd0b69737ede8R11
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Above.
### Screenshot
-
### Logs
```shell
-
```
### System Info
```shell
-
```
### Severity
blocking upgrade to latest gradio version
| Wouldn't it make sense to have the default be `""` if `multiselect=False` and be `[]` if `multiselect=True`?
Yeah, thats what it should be and brings it back to the previous behaviour.
We can consider aligning the types in 4.0. | 2023-02-27T17:16:12 |
|
gradio-app/gradio | 3,342 | gradio-app__gradio-3342 | [
"3282"
] | f86b6445f80f70334a7555cba9dd9162dcaf194b | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -4,7 +4,6 @@
import inspect
import json
import os
-import pkgutil
import random
import secrets
import sys
@@ -30,6 +29,7 @@
from gradio.themes import ThemeClass as Theme
from gradio.tunneling import CURRENT_TUNNELS
from gradio.utils import (
+ GRADIO_VERSION,
TupleNoPrint,
check_function_inputs_match,
component_or_layout_class,
@@ -40,7 +40,6 @@
set_documentation_group("blocks")
-
if TYPE_CHECKING: # Only import for type checking (is False at runtime).
import comet_ml
from fastapi.applications import FastAPI
@@ -485,7 +484,8 @@ def __init__(
if analytics_enabled is not None
else os.getenv("GRADIO_ANALYTICS_ENABLED", "True") == "True"
)
-
+ if not self.analytics_enabled:
+ os.environ["HF_HUB_DISABLE_TELEMETRY"] = "True"
super().__init__(render=False, **kwargs)
self.blocks: Dict[int, Block] = {}
self.fns: List[BlockFunction] = []
@@ -522,9 +522,8 @@ def __init__(
data = {
"mode": self.mode,
"custom_css": self.css is not None,
- "version": (pkgutil.get_data(__name__, "version.txt") or b"")
- .decode("ascii")
- .strip(),
+ "theme": self.theme,
+ "version": GRADIO_VERSION,
}
utils.initiated_analytics(data)
@@ -1579,6 +1578,7 @@ def reverse(text):
"mode": self.mode,
}
utils.launch_analytics(data)
+ utils.launched_telemetry(self, data)
utils.show_tip(self)
diff --git a/gradio/interface.py b/gradio/interface.py
--- a/gradio/interface.py
+++ b/gradio/interface.py
@@ -8,7 +8,6 @@
import inspect
import json
import os
-import pkgutil
import re
import warnings
import weakref
@@ -31,6 +30,7 @@
from gradio.layouts import Column, Row, Tab, Tabs
from gradio.pipelines import load_from_pipeline
from gradio.themes import ThemeClass as Theme
+from gradio.utils import GRADIO_VERSION
set_documentation_group("interface")
@@ -316,13 +316,8 @@ def clean_html(raw_html):
self.simple_server = None
- # For analytics_enabled and allow_flagging: (1) first check for
- # parameter, (2) check for env variable, (3) default to True/"manual"
- self.analytics_enabled = (
- analytics_enabled
- if analytics_enabled is not None
- else os.getenv("GRADIO_ANALYTICS_ENABLED", "True") == "True"
- )
+ # For allow_flagging: (1) first check for parameter,
+ # (2) check for env variable, (3) default to True/"manual"
if allow_flagging is None:
allow_flagging = os.getenv("GRADIO_ALLOW_FLAGGING", "manual")
if allow_flagging is True:
@@ -388,9 +383,8 @@ def clean_html(raw_html):
"interpretation": interpretation,
"allow_flagging": allow_flagging,
"custom_css": self.css is not None,
- "version": (pkgutil.get_data(__name__, "version.txt") or b"")
- .decode("ascii")
- .strip(),
+ "theme": self.theme,
+ "version": GRADIO_VERSION,
}
utils.initiated_analytics(data)
diff --git a/gradio/utils.py b/gradio/utils.py
--- a/gradio/utils.py
+++ b/gradio/utils.py
@@ -41,6 +41,7 @@
import httpx
import matplotlib.pyplot as plt
import requests
+from huggingface_hub.utils import send_telemetry
from markdown_it import MarkdownIt
from mdit_py_plugins.dollarmath.index import dollarmath_plugin
from mdit_py_plugins.footnote.index import footnote_plugin
@@ -57,6 +58,9 @@
analytics_url = "https://api.gradio.app/"
PKG_VERSION_URL = "https://api.gradio.app/pkg-version"
JSON_PATH = os.path.join(os.path.dirname(gradio.__file__), "launches.json")
+GRADIO_VERSION = (
+ (pkgutil.get_data(__name__, "version.txt") or b"").decode("ascii").strip()
+)
T = TypeVar("T")
@@ -113,7 +117,19 @@ def initiated_analytics_thread(data: Dict[str, Any]) -> None:
except (requests.ConnectionError, requests.exceptions.ReadTimeout):
pass # do not push analytics if no network
+ def initiated_telemetry_thread(data: Dict[str, Any]) -> None:
+ try:
+ send_telemetry(
+ topic="gradio/initiated",
+ library_name="gradio",
+ library_version=GRADIO_VERSION,
+ user_agent=data,
+ )
+ except Exception:
+ pass
+
threading.Thread(target=initiated_analytics_thread, args=(data,)).start()
+ threading.Thread(target=initiated_telemetry_thread, args=(data,)).start()
def launch_analytics(data: Dict[str, Any]) -> None:
@@ -130,6 +146,65 @@ def launch_analytics_thread(data: Dict[str, Any]) -> None:
threading.Thread(target=launch_analytics_thread, args=(data,)).start()
+def launched_telemetry(blocks: gradio.Blocks, data: Dict[str, Any]) -> None:
+ blocks_telemetry, inputs_telemetry, outputs_telemetry, targets_telemetry = (
+ [],
+ [],
+ [],
+ [],
+ )
+
+ for x in list(blocks.blocks.values()):
+ blocks_telemetry.append(x.get_block_name()) if isinstance(
+ x, BlockContext
+ ) else blocks_telemetry.append(str(x))
+
+ for x in blocks.dependencies:
+ targets_telemetry = targets_telemetry + [
+ str(blocks.blocks[y]) for y in x["targets"]
+ ]
+ inputs_telemetry = inputs_telemetry + [
+ str(blocks.blocks[y]) for y in x["inputs"]
+ ]
+ outputs_telemetry = outputs_telemetry + [
+ str(blocks.blocks[y]) for y in x["outputs"]
+ ]
+ additional_data = {
+ "is_kaggle": blocks.is_kaggle,
+ "is_sagemaker": blocks.is_sagemaker,
+ "using_auth": blocks.auth is not None,
+ "dev_mode": blocks.dev_mode,
+ "show_api": blocks.show_api,
+ "show_error": blocks.show_error,
+ "theme": blocks.theme,
+ "title": blocks.title,
+ "inputs": blocks.input_components
+ if blocks.mode == "interface"
+ else inputs_telemetry,
+ "outputs": blocks.output_components
+ if blocks.mode == "interface"
+ else outputs_telemetry,
+ "targets": targets_telemetry,
+ "blocks": blocks_telemetry,
+ "events": [str(x["trigger"]) for x in blocks.dependencies],
+ }
+
+ data.update(additional_data)
+
+ def launched_telemtry_thread(data: Dict[str, Any]) -> None:
+ try:
+ send_telemetry(
+ topic="gradio/launched",
+ library_name="gradio",
+ library_version=GRADIO_VERSION,
+ user_agent=data,
+ )
+ except Exception as e:
+ print("Error while sending telemetry: {}".format(e))
+
+ threading.Thread(target=launched_telemtry_thread, args=(data,)).start()
+
+
def integration_analytics(data: Dict[str, Any]) -> None:
data.update({"ip_address": get_local_ip_address()})
@@ -141,7 +216,19 @@ def integration_analytics_thread(data: Dict[str, Any]) -> None:
except (requests.ConnectionError, requests.exceptions.ReadTimeout):
pass # do not push analytics if no network
+ def integration_telemetry_thread(data: Dict[str, Any]) -> None:
+ try:
+ send_telemetry(
+ topic="gradio/integration",
+ library_name="gradio",
+ library_version=GRADIO_VERSION,
+ user_agent=data,
+ )
+ except Exception as e:
+ print("Error while sending telemetry: {}".format(e))
+
threading.Thread(target=integration_analytics_thread, args=(data,)).start()
+ threading.Thread(target=integration_telemetry_thread, args=(data,)).start()
def error_analytics(message: str) -> None:
@@ -160,7 +247,19 @@ def error_analytics_thread(data: Dict[str, Any]) -> None:
except (requests.ConnectionError, requests.exceptions.ReadTimeout):
pass # do not push analytics if no network
+ def error_telemetry_thread(data: Dict[str, Any]) -> None:
+ try:
+ send_telemetry(
+ topic="gradio/error",
+ library_name="gradio",
+ library_version=GRADIO_VERSION,
+ user_agent=message,
+ )
+ except Exception as e:
+ print("Error while sending telemetry: {}".format(e))
+
threading.Thread(target=error_analytics_thread, args=(data,)).start()
+ threading.Thread(target=error_telemetry_thread, args=(data,)).start()
async def log_feature_analytics(feature: str) -> None:
| diff --git a/test/requirements.in b/test/requirements.in
--- a/test/requirements.in
+++ b/test/requirements.in
@@ -11,7 +11,7 @@ scikit-image
shap
pytest
wandb
-huggingface_hub
+huggingface_hub>=0.13.0
pytest-cov
pytest-asyncio
black
diff --git a/test/requirements.txt b/test/requirements.txt
--- a/test/requirements.txt
+++ b/test/requirements.txt
@@ -1,5 +1,5 @@
#
-# This file is autogenerated by pip-compile with Python 3.10
+# This file is autogenerated by pip-compile with Python 3.9
# by the following command:
#
# pip-compile --output-file=requirements.txt
@@ -24,8 +24,12 @@ backcall==0.2.0
# via ipython
black==22.6.0
# via -r requirements.in
-boto3==1.26.65
+boto3==1.26.65
# via -r requirements.in
+botocore==1.29.87
+ # via
+ # boto3
+ # s3transfer
certifi==2022.6.15
# via
# dulwich
@@ -98,7 +102,7 @@ httpx==0.23.0
# via
# -r requirements.in
# respx
-huggingface-hub==0.8.1
+huggingface-hub==0.13.0
# via
# -r requirements.in
# transformers
@@ -110,7 +114,9 @@ idna==3.3
imageio==2.19.5
# via scikit-image
importlib-metadata==4.2.0
- # via mlflow
+ # via
+ # flask
+ # mlflow
iniconfig==1.1.1
# via pytest
ipython==7.34.0
@@ -125,6 +131,10 @@ jinja2==3.1.2
# via
# altair
# flask
+jmespath==1.0.1
+ # via
+ # boto3
+ # botocore
joblib==1.1.0
# via scikit-learn
jsonschema==4.7.2
@@ -245,7 +255,9 @@ pytest-asyncio==0.19.0
pytest-cov==3.0.0
# via -r requirements.in
python-dateutil==2.8.2
- # via pandas
+ # via
+ # botocore
+ # pandas
pytz==2022.1
# via
# mlflow
@@ -278,6 +290,8 @@ respx==0.19.2
# via -r requirements.in
rfc3986[idna2008]==1.5.0
# via httpx
+s3transfer==0.6.0
+ # via boto3
scikit-image==0.19.3
# via -r requirements.in
scikit-learn==1.0.2
@@ -358,11 +372,14 @@ transformers==4.20.1
# via -r requirements.in
typing-extensions==4.3.0
# via
+ # black
# huggingface-hub
# pydantic
+ # starlette
# torch
urllib3==1.26.10
# via
+ # botocore
# dulwich
# requests
# sentry-sdk
| gr.update on multiple output components errors when output created using list multiplication but succeeds with list comprehension
### Describe the bug
`[gr.update(visible=value) for _ in range(2)]` works.
`[gr.update(visible=value)]*2` gives error below.
Toggle the checkbox to trigger.
I printed in the function body to check differences in the 2 variants. They look the same.
```
True
variant1: [{'visible': False, '__type__': 'generic_update'}, {'visible': False, '__type__': 'generic_update'}]
variant2: [{'visible': False, '__type__': 'generic_update'}, {'visible': False, '__type__': 'generic_update'}]
```
I tried debugging the python file and see that the failure variant can stop on my breakpoint at `gradio/components.py", line 1534, in postprocess` but the successful variant does not stop at the breakpoint?
Why is the successful variant working? And why is it not stopping at my breakpoint like the failure variant does? Are they going down different code paths?
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```
import gradio as gr
num_images = 2
with gr.Blocks() as demo:
im_list = [gr.Image() for i in range(num_images)]
def change_visibility(value):
variant1 = [gr.update(visible=value) for _ in range(2)]
variant2 = [gr.update(visible=value)]*2
print(variant1 == variant2)
print('variant1:', variant1)
print('variant2:', variant2)
# return variant1
return variant2
checkbox = gr.Checkbox(value=True, label='Show image1')
checkbox.change(change_visibility, inputs=checkbox,outputs=im_list)
if __name__ == "__main__":
demo.launch()
```
### Screenshot
_No response_
### Logs
```shell
Traceback (most recent call last):
File "/Users/hanqi/code/ahrefs/venv/lib/python3.8/site-packages/gradio/routes.py", line 384, in run_predict
output = await app.get_blocks().process_api(
File "/Users/hanqi/code/ahrefs/venv/lib/python3.8/site-packages/gradio/blocks.py", line 1027, in process_api
data = self.postprocess_data(fn_index, result["prediction"], state)
File "/Users/hanqi/code/ahrefs/venv/lib/python3.8/site-packages/gradio/blocks.py", line 964, in postprocess_data
prediction_value = block.postprocess(prediction_value)
File "/Users/hanqi/code/ahrefs/venv/lib/python3.8/site-packages/gradio/components.py", line 1534, in postprocess
raise ValueError("Cannot process this value as an Image")
ValueError: Cannot process this value as an Image
```
```
### System Info
```shell
Python 3.8.12 (default, Dec 8 2021, 11:22:00)
[Clang 12.0.5 (clang-1205.0.22.11)]
Gradio version 3.19.1
MacOS Big Sur version 11.3
```
### Severity
annoying
| So strange, I can confirm the issue but have no idea what could be causing this! We'll look into it, thanks for reporting @gitgithan!
Thanks for filing @gitgithan !
The [prostprocessing](https://github.com/gradio-app/gradio/blob/main/gradio/blocks.py#L360) of update dictionaries involves deleting keys from the dictionary, which is why shallow copies will not work as expected.
Maybe this can be rewritten to not modify the dictionaries in place.
Ah nice catch @freddyaboulton! That should fix it. @gitgithan would you be interested in making a PR?
@abidlabs Yes I'm interested in looking at how to fix this, it has caused me immeasurable pain today.
Could I get some guidance on how to trace the execution so I understand what's going on?
I assume I'll need to know the code paths for both variants to compare differences, and to ensure relevant files are considered/editted.
@freddyaboulton 's comment and link seems to have dived straight into the source of the issue but I only have 1 day experience on Gradio so far so it's over my head
Thanks for wanting to take a look at this @gitgithan !
The logic for processing the user defined functions in the backend is pretty much handled entirely by [process_api](https://github.com/gradio-app/gradio/blob/main/gradio/blocks.py#L969).
There are three parts:
1. Preprocessing the data from the front-end
2. Calling the user defined function with this data
3. post processing the data returned from the function
If you want to wrap your head around this, I would set a `breakpoint()` in `process_api` and then launch a simple demo and step through it.
I think this particular problem is caused in this [part](https://github.com/gradio-app/gradio/blob/main/gradio/blocks.py#L955) of the postprocessing, where we apply special processing for update dictionaries.
I think just modifying that function to return a new copy of the update dict with the keys removed, as opposed to removing the keys in-place should fix the issue. | 2023-02-27T20:37:46 |
gradio-app/gradio | 3,345 | gradio-app__gradio-3345 | [
"3311"
] | ba6e4accb6a129eddad66feed9f0b902061955b4 | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -1241,7 +1241,9 @@ def __init__(
if isinstance(value, str):
value = [value]
if not multiselect and max_choices is not None:
- warnings.warn("The `max_choices` parameter is ignored when `multiselect` is False.")
+ warnings.warn(
+ "The `max_choices` parameter is ignored when `multiselect` is False."
+ )
self.max_choices = max_choices
self.test_input = self.choices[0] if len(self.choices) else None
self.interpret_by_tokens = False
@@ -3831,6 +3833,7 @@ def style(
grid: int | Tuple | None = None,
height: str | None = None,
container: bool | None = None,
+ preview: bool | None = None,
**kwargs,
):
"""
@@ -3844,6 +3847,8 @@ def style(
self._style["grid"] = grid
if height is not None:
self._style["height"] = height
+ if preview is not None:
+ self._style["preview"] = preview
return Component.style(self, container=container, **kwargs)
| Have option to display gallery in preview state
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
As a user, it would be great to have the option to display the gallery in a preview state (what it looks like when an image is clicked):
<img width="667" alt="image" src="https://user-images.githubusercontent.com/41651716/221232958-0910785d-26ad-4518-8182-67245c9a3280.png">
**Describe the solution you'd like**
Add `preview=True|False` to the `style` method. I guess default to `False` to not change behavior of apps that upgrade versions.
**Additional context**
cc @sashavor
| 2023-02-28T00:18:55 |
||
gradio-app/gradio | 3,358 | gradio-app__gradio-3358 | [
"3303"
] | 6c9c41b15d1a8b3f29d61b6bf172b6fc3a918ab2 | diff --git a/demo/blocks_js_methods/run.py b/demo/blocks_js_methods/run.py
--- a/demo/blocks_js_methods/run.py
+++ b/demo/blocks_js_methods/run.py
@@ -10,19 +10,32 @@
with gr.Row():
btn = gr.Button("Create sentence.")
reverse_btn = gr.Button("Reverse sentence.")
- foo_bar_btn = gr.Button("Foo bar.")
-
+ foo_bar_btn = gr.Button("Append foo")
+ reverse_then_to_the_server_btn = gr.Button(
+ "Reverse sentence and send to server."
+ )
+
def sentence_maker(w1, w2, w3):
return f"{w1} {w2} {w3}"
output1 = gr.Textbox(label="output 1")
output2 = gr.Textbox(label="verb")
output3 = gr.Textbox(label="verb reversed")
+ output4 = gr.Textbox(label="front end process and then send to backend")
btn.click(sentence_maker, [subject, verb, object], output1)
- reverse_btn.click(None, [subject, verb, object], output2, _js="(s, v, o) => o + ' ' + v + ' ' + s")
+ reverse_btn.click(
+ None, [subject, verb, object], output2, _js="(s, v, o) => o + ' ' + v + ' ' + s"
+ )
verb.change(lambda x: x, verb, output3, _js="(x) => [...x].reverse().join('')")
foo_bar_btn.click(None, [], subject, _js="(x) => x + ' foo'")
+ reverse_then_to_the_server_btn.click(
+ sentence_maker,
+ [subject, verb, object],
+ output4,
+ _js="(s, v, o) => [s, v, o].map(x => [...x].reverse().join(''))",
+ )
+
if __name__ == "__main__":
- demo.launch()
\ No newline at end of file
+ demo.launch()
| Custom js with multiple input and one output can not work as expect
### Describe the bug
When using custom js function, the gradio ui will wrap this function and make sure the payload is a list to adapt the backend routing rules.
https://github.com/gradio-app/gradio/blob/f36211050cf75b587027cf2a79e88b8c7a3a05fd/ui/packages/app/src/Blocks.svelte#L60-L74
I guess the original intention of this function is to check the `d.inputs.length`
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```python
import gradio
def inference(foo, bar):
return foo + bar
with gradio.Blocks() as main:
input1 = gradio.Textbox()
input2 = gradio.Textbox()
output = gradio.Text()
btn = gradio.Button("submit")
js = '(x, y) => { return [x, y]; }'
btn.click(inference, inputs=[input1, input2], outputs=output, _js=js)
main.launch()
```
### Screenshot
_No response_
### Logs
```shell
Running on local URL: http://127.0.0.1:7860
To create a public link, set `share=True` in `launch()`.
Traceback (most recent call last):
File "/home/foo/.conda/envs/sw37/lib/python3.7/site-packages/gradio/routes.py", line 327, in run_predict
iterators=iterators,
File "/home/foo/.conda/envs/sw37/lib/python3.7/site-packages/gradio/blocks.py", line 1013, in process_api
inputs = self.preprocess_data(fn_index, inputs, state)
File "/home/foo/.conda/envs/sw37/lib/python3.7/site-packages/gradio/blocks.py", line 923, in preprocess_data
processed_input.append(block.preprocess(inputs[i]))
IndexError: list index out of range
```
### System Info
```shell
gradio: 3.15.0
system: Arch Linux
browser: google-chrome 110.0.5481.177
```
### Severity
annoying
| 2023-03-02T07:40:42 |
||
gradio-app/gradio | 3,405 | gradio-app__gradio-3405 | [
"3282"
] | fddf37678450447bf2d73a63d1340cf90e083bbc | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -251,6 +251,7 @@ def update(**kwargs) -> Dict:
@classmethod
def get_specific_update(cls, generic_update: Dict[str, Any]) -> Dict:
+ generic_update = generic_update.copy()
del generic_update["__type__"]
specific_update = cls.update(**generic_update)
return specific_update
| diff --git a/test/test_blocks.py b/test/test_blocks.py
--- a/test/test_blocks.py
+++ b/test/test_blocks.py
@@ -484,6 +484,22 @@ def test_blocks_does_not_replace_keyword_literal(self):
output = demo.postprocess_data(0, gr.update(value="NO_VALUE"), state={})
assert output[0]["value"] == "NO_VALUE"
+ def test_blocks_does_not_del_dict_keys_inplace(self):
+ with gr.Blocks() as demo:
+ im_list = [gr.Image() for i in range(2)]
+
+ def change_visibility(value):
+ return [gr.update(visible=value)] * 2
+
+ checkbox = gr.Checkbox(value=True, label="Show image")
+ checkbox.change(change_visibility, inputs=checkbox, outputs=im_list)
+
+ output = demo.postprocess_data(0, [gr.update(visible=False)] * 2, state={})
+ assert output == [
+ {"visible": False, "__type__": "update"},
+ {"visible": False, "__type__": "update"},
+ ]
+
def test_blocks_returns_correct_output_dict_single_key(self):
with gr.Blocks() as demo:
num = gr.Number()
| gr.update on multiple output components errors when output created using list multiplication but succeeds with list comprehension
### Describe the bug
`[gr.update(visible=value) for _ in range(2)]` works.
`[gr.update(visible=value)]*2` gives error below.
Toggle the checkbox to trigger.
I printed in the function body to check differences in the 2 variants. They look the same.
```
True
variant1: [{'visible': False, '__type__': 'generic_update'}, {'visible': False, '__type__': 'generic_update'}]
variant2: [{'visible': False, '__type__': 'generic_update'}, {'visible': False, '__type__': 'generic_update'}]
```
I tried debugging the python file and see that the failure variant can stop on my breakpoint at `gradio/components.py", line 1534, in postprocess` but the successful variant does not stop at the breakpoint?
Why is the successful variant working? And why is it not stopping at my breakpoint like the failure variant does? Are they going down different code paths?
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```
import gradio as gr
num_images = 2
with gr.Blocks() as demo:
im_list = [gr.Image() for i in range(num_images)]
def change_visibility(value):
variant1 = [gr.update(visible=value) for _ in range(2)]
variant2 = [gr.update(visible=value)]*2
print(variant1 == variant2)
print('variant1:', variant1)
print('variant2:', variant2)
# return variant1
return variant2
checkbox = gr.Checkbox(value=True, label='Show image1')
checkbox.change(change_visibility, inputs=checkbox,outputs=im_list)
if __name__ == "__main__":
demo.launch()
```
### Screenshot
_No response_
### Logs
```shell
Traceback (most recent call last):
File "/Users/hanqi/code/ahrefs/venv/lib/python3.8/site-packages/gradio/routes.py", line 384, in run_predict
output = await app.get_blocks().process_api(
File "/Users/hanqi/code/ahrefs/venv/lib/python3.8/site-packages/gradio/blocks.py", line 1027, in process_api
data = self.postprocess_data(fn_index, result["prediction"], state)
File "/Users/hanqi/code/ahrefs/venv/lib/python3.8/site-packages/gradio/blocks.py", line 964, in postprocess_data
prediction_value = block.postprocess(prediction_value)
File "/Users/hanqi/code/ahrefs/venv/lib/python3.8/site-packages/gradio/components.py", line 1534, in postprocess
raise ValueError("Cannot process this value as an Image")
ValueError: Cannot process this value as an Image
```
```
### System Info
```shell
Python 3.8.12 (default, Dec 8 2021, 11:22:00)
[Clang 12.0.5 (clang-1205.0.22.11)]
Gradio version 3.19.1
MacOS Big Sur version 11.3
```
### Severity
annoying
| So strange, I can confirm the issue but have no idea what could be causing this! We'll look into it, thanks for reporting @gitgithan!
Thanks for filing @gitgithan !
The [prostprocessing](https://github.com/gradio-app/gradio/blob/main/gradio/blocks.py#L360) of update dictionaries involves deleting keys from the dictionary, which is why shallow copies will not work as expected.
Maybe this can be rewritten to not modify the dictionaries in place.
Ah nice catch @freddyaboulton! That should fix it. @gitgithan would you be interested in making a PR?
@abidlabs Yes I'm interested in looking at how to fix this, it has caused me immeasurable pain today.
Could I get some guidance on how to trace the execution so I understand what's going on?
I assume I'll need to know the code paths for both variants to compare differences, and to ensure relevant files are considered/editted.
@freddyaboulton 's comment and link seems to have dived straight into the source of the issue but I only have 1 day experience on Gradio so far so it's over my head
Thanks for wanting to take a look at this @gitgithan !
The logic for processing the user defined functions in the backend is pretty much handled entirely by [process_api](https://github.com/gradio-app/gradio/blob/main/gradio/blocks.py#L969).
There are three parts:
1. Preprocessing the data from the front-end
2. Calling the user defined function with this data
3. post processing the data returned from the function
If you want to wrap your head around this, I would set a `breakpoint()` in `process_api` and then launch a simple demo and step through it.
I think this particular problem is caused in this [part](https://github.com/gradio-app/gradio/blob/main/gradio/blocks.py#L955) of the postprocessing, where we apply special processing for update dictionaries.
I think just modifying that function to return a new copy of the update dict with the keys removed, as opposed to removing the keys in-place should fix the issue.
Hi @gitgithan , wondering if you're still interested in helping us fix this issue! | 2023-03-07T17:24:39 |
gradio-app/gradio | 3,434 | gradio-app__gradio-3434 | [
"3433"
] | c22f84343baaff1f215befa0e337f9abbe3e60f3 | diff --git a/gradio/documentation.py b/gradio/documentation.py
--- a/gradio/documentation.py
+++ b/gradio/documentation.py
@@ -198,15 +198,25 @@ def generate_documentation():
instance_attribute_fn = fn_name.startswith("*")
if instance_attribute_fn:
fn_name = fn_name[1:]
- fn = getattr(cls(), fn_name)
+ # Instance attribute fns are classes
+ # whose __call__ method determines their behavior
+ fn = getattr(cls(), fn_name).__call__
else:
fn = getattr(cls, fn_name)
- (
- description_doc,
- parameter_docs,
- return_docs,
- examples_doc,
- ) = document_fn(fn, cls)
+ if not callable(fn):
+ description_doc = str(fn)
+ parameter_docs = {}
+ return_docs = {}
+ examples_doc = ""
+ override_signature = f"gr.{cls}.{fn}"
+ else:
+ (
+ description_doc,
+ parameter_docs,
+ return_docs,
+ examples_doc,
+ ) = document_fn(fn, cls)
+ override_signature = None
if instance_attribute_fn:
description_doc = extract_instance_attr_doc(cls, fn_name)
cls_documentation["fns"].append(
@@ -218,6 +228,7 @@ def generate_documentation():
"parameters": parameter_docs,
"returns": return_docs,
"example": examples_doc,
+ "override_signature": override_signature
}
)
documentation[mode].append(cls_documentation)
| Event Handler Parameter Documentation not showing on main
### Describe the bug
On main: https://gradio.app/docs/main/#audio
<img width="1079" alt="image" src="https://user-images.githubusercontent.com/41651716/224377044-291f99be-ae46-46f3-80fc-3ac6e940a98e.png">
On latest:
<img width="1049" alt="image" src="https://user-images.githubusercontent.com/41651716/224377167-7a9a2fd5-1348-46b6-aeeb-e31fe62b8e48.png">
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
-
### Screenshot
_No response_
### Logs
```shell
-
```
### System Info
```shell
-
```
### Severity
annoying
| Hmmm @aliabid94
Think I figured it out | 2023-03-10T17:23:00 |
|
gradio-app/gradio | 3,452 | gradio-app__gradio-3452 | [
"2585"
] | 373c8dd71635b57409a6b78741fc5a77090a5ca8 | diff --git a/demo/blocks_essay/run.py b/demo/blocks_essay/run.py
--- a/demo/blocks_essay/run.py
+++ b/demo/blocks_essay/run.py
@@ -14,7 +14,7 @@ def change_textbox(choice):
radio = gr.Radio(
["short", "long", "none"], label="What kind of essay would you like to write?"
)
- text = gr.Textbox(lines=2, interactive=True)
+ text = gr.Textbox(lines=2, interactive=True).style(show_copy_button=True)
radio.change(fn=change_textbox, inputs=radio, outputs=text)
diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -444,6 +444,24 @@ def get_interpretation_scores(
result.append((self.interpretation_separator, 0))
return result
+ def style(
+ self,
+ *,
+ show_copy_button: bool | None = None,
+ container: bool | None = None,
+ **kwargs,
+ ):
+ """
+ This method can be used to change the appearance of the Textbox component.
+ Parameters:
+ show_copy_button: If True, includes a copy button to copy the text in the textbox. Only applies if show_label is True.
+ container: If True, will place the component in a container - providing some extra padding around the border.
+ """
+ if show_copy_button is not None:
+ self._style["show_copy_button"] = show_copy_button
+
+ return Component.style(self, container=container, **kwargs)
+
@document("style")
class Number(
@@ -3037,9 +3055,6 @@ def __init__(
self.stateful = True
IOComponent.__init__(self, value=deepcopy(value), **kwargs)
- def style(self):
- return self
-
class Variable(State):
"""Variable was renamed to State. This class is kept for backwards compatibility."""
| Add copy to clipboard button to other input types
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
The only input type that shows the copy to clipboard button is JSON.
**Describe the solution you'd like**
It would be great if users can optionally show this button for other input types such as gr.TextBox as well.
<img src="https://user-images.githubusercontent.com/6425112/199174802-25e484c6-ddeb-446d-b1de-07f6842758c8.png" width="300" />
| Thanks for the suggestion @armancohan. I do think though is that this button is not too crucial for the other components since it is easy enough to select the contents of a Textbox, or right click to copy an image and the presence of such a button would clutter the UI
Hi,
I also would like to have this feature, as the content of the Textbook may be very long.
👍 ok perhaps it's worth adding the `Textbox` component, as well as the upcoming `Code` component, WDYT @pngwn?
I'm looking for this feature as well, really useful for text outputs.
We could add it as an option to the components it makes sense for but might not be a great default?
Sounds good. Fwiw since I originally commented on this issue, I have changed my mind -- I do think it would be quite useful for demos where someone is generating lots of text (e.g. speech-to-text demos)
Might be useful as part of a UI toolkit used for building custom components too. This is a pretty simple issues, we just need to figure out what components to add the feature to + the api.
Seconding.
also looking forward on copy to clipboard on a textbox | 2023-03-13T20:51:33 |
|
gradio-app/gradio | 3,482 | gradio-app__gradio-3482 | [
"3472"
] | 6ffa7f105e1069f19e6f9e8748763755756cf1ca | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -71,6 +71,7 @@ def __init__(
self._skip_init_processing = _skip_init_processing
self._style = {}
self.parent: BlockContext | None = None
+ self.root = ""
if render:
self.render()
@@ -1102,6 +1103,7 @@ def get_config_file(self):
"show_error": getattr(self, "show_error", False),
"show_api": self.show_api,
"is_colab": utils.colab_check(),
+ "root": self.root
}
def getLayout(block):
diff --git a/gradio/routes.py b/gradio/routes.py
--- a/gradio/routes.py
+++ b/gradio/routes.py
@@ -755,6 +755,7 @@ def read_main():
# Then run `uvicorn run:app` from the terminal and navigate to http://localhost:8000/gradio.
"""
blocks.dev_mode = False
+ blocks.root = path[:-1] if path.endswith("/") else path
blocks.config = blocks.get_config_file()
gradio_app = App.create_app(blocks)
| UI not displaying correctly because of path to `theme.css`
### Describe the bug
The UI displays correctly on the local system. But when I upload it to the server, the UI CSS is not present.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
NA
### Screenshot
<img width="1440" alt="Screenshot 2023-03-15 at 12 23 45 AM" src="https://user-images.githubusercontent.com/71621292/225127688-2ce7189c-70f5-44c4-bce7-ee96040967a6.png">
### Logs
```shell
No logs. Only a 404 error.
```
### System Info
```shell
This is hosted on railway.app.
```
### Severity
annoying
| Here's the URL:
https://fastapi-production-72d5.up.railway.app/answer-ui/
I'm not super familiar with Railway but it seems like the problem is that Gradio needs access to `/theme.css` to load the CSS, and either this file is missing from the Railway app, or Railway is blocking this file
I tried loading the thing on a different account and it seems to be working. But it's only this account for some reason. Do you know how I can get this sorted?
> I'm not super familiar with Railway but it seems like the problem is that Gradio needs access to `/theme.css` to load the CSS, and either this file is missing from the Railway app, or Railway is blocking this file
Is there any way I can index the theme directly in the code?
> I tried loading the thing on a different account and it seems to be working. But it's only this account for some reason. Do you know how I can get this sorted?
It works on a different Railway app? Then it doesn't sound like a Gradio issue, it sounds like an issue with Railway, no?
> Is there any way I can index the theme directly in the code?
I don't think so. cc @aliabid94 for your thoughts
Yes, you're right. It's probably an issue with them. But is there any CDN that I can use to index the theme.css file?
@aliabid94
No the theme.css file is creating specifically for your app and it might differ from app to app, so it wouldn't be available on a CDN. I'm going to close this issue as it doesn't seem related to Gradio, but rather Railway. If you could reach out to them and see how to serve a static css file, that should solve the issue you're seeing.
It looks gradios's bug.
I ran into the same problem while trying the following case:
```python
app = gr.mount_gradio_app(app, io, path="/gradio")
```
This is an example of incorporating FastAPI into any of the FastAPI routes listed in the documentation.
https://gradio.app/docs/#mount_gradio_app
For some reason, the front end assumes that theme.css is directly under root and tries to load it, but in fact `/gradio/theme.css` is served.
Ah, thanks @hinaloe for the heads up. cc @aliabid94 we should resolve this before releasing themes | 2023-03-16T17:23:15 |
|
gradio-app/gradio | 3,573 | gradio-app__gradio-3573 | [
"3467"
] | cecd5a2526a7328cf900d297f23bdea739e6735e | diff --git a/demo/code/run.py b/demo/code/run.py
--- a/demo/code/run.py
+++ b/demo/code/run.py
@@ -13,7 +13,7 @@ def set_lang(language):
def set_lang_from_path():
sleep(1)
- return gr.Code.update((css_file, ), language="css")
+ return gr.Code.update((css_file,), language="css")
def code(language, code):
@@ -25,7 +25,11 @@ def code(language, code):
with gr.Blocks() as demo:
lang = gr.Dropdown(value="python", choices=gr.Code.languages)
with gr.Row():
- code_in = gr.Code(language="python", label="Input")
+ code_in = gr.Code(
+ language="python",
+ label="Input",
+ value='def all_odd_elements(sequence):\n """Returns every odd element of the sequence."""',
+ )
code_out = gr.Code(label="Ouput")
btn = gr.Button("Run")
btn_two = gr.Button("Load File")
| diff --git a/ui/packages/app/test/blocks_inputs.spec.ts b/ui/packages/app/test/blocks_inputs.spec.ts
--- a/ui/packages/app/test/blocks_inputs.spec.ts
+++ b/ui/packages/app/test/blocks_inputs.spec.ts
@@ -1,4 +1,5 @@
import { test, expect, Page } from "@playwright/test";
+import { mock_theme, wait_for_page } from "./utils";
function mock_demo(page: Page, demo: string) {
return page.route("**/config", (route) => {
@@ -28,7 +29,8 @@ function mock_api(page: Page, body: Array<unknown>) {
test("renders the correct elements", async ({ page }) => {
await mock_demo(page, "blocks_inputs");
await mock_api(page, [["hi dawood"]]);
- await page.goto("http://localhost:9876");
+ await mock_theme(page);
+ await wait_for_page(page);
const textboxes = await page.getByLabel("Input");
diff --git a/ui/packages/app/test/blocks_kinematics.spec.ts b/ui/packages/app/test/blocks_kinematics.spec.ts
--- a/ui/packages/app/test/blocks_kinematics.spec.ts
+++ b/ui/packages/app/test/blocks_kinematics.spec.ts
@@ -1,4 +1,5 @@
import { test, expect, Page } from "@playwright/test";
+import { mock_theme, wait_for_page } from "./utils";
function mock_demo(page: Page, demo: string) {
return page.route("**/config", (route) => {
@@ -28,7 +29,8 @@ function mock_api(page: Page, body: Array<unknown>) {
test("renders the correct elements", async ({ page }) => {
await mock_demo(page, "blocks_kinematics");
await mock_api(page, [[25, 45]]);
- await page.goto("http://localhost:9876");
+ await mock_theme(page);
+ await wait_for_page(page);
await Promise.all([
page.click("button:has-text('Run')"),
diff --git a/ui/packages/app/test/blocks_page_load.spec.ts b/ui/packages/app/test/blocks_page_load.spec.ts
--- a/ui/packages/app/test/blocks_page_load.spec.ts
+++ b/ui/packages/app/test/blocks_page_load.spec.ts
@@ -1,4 +1,5 @@
import { test, expect, Page } from "@playwright/test";
+import { mock_theme, wait_for_page } from "./utils";
function mock_demo(page: Page, demo: string) {
return page.route("**/config", (route) => {
@@ -28,7 +29,8 @@ function mock_api(page: Page, body: Array<unknown>) {
test("renders the correct elements", async ({ page }) => {
await mock_demo(page, "blocks_page_load");
await mock_api(page, [["Welcome! This page has loaded for Frank"]]);
- await page.goto("http://localhost:9876");
+ await mock_theme(page);
+ await wait_for_page(page);
const textbox = await page.getByLabel("Name");
diff --git a/ui/packages/app/test/blocks_xray.spec.ts b/ui/packages/app/test/blocks_xray.spec.ts
--- a/ui/packages/app/test/blocks_xray.spec.ts
+++ b/ui/packages/app/test/blocks_xray.spec.ts
@@ -1,4 +1,5 @@
import { test, expect, Page } from "@playwright/test";
+import { mock_theme, wait_for_page } from "./utils";
function mock_demo(page: Page, demo: string) {
return page.route("**/config", (route) => {
@@ -27,7 +28,8 @@ function mock_api(page: Page, body: Array<unknown>) {
test("renders the correct elements", async ({ page }) => {
await mock_demo(page, "blocks_xray");
- await page.goto("http://localhost:9876");
+ await mock_theme(page);
+ await wait_for_page(page);
const description = await page.getByTestId("markdown");
await expect(description).toContainText("Detect Disease From Scan");
@@ -56,7 +58,8 @@ test("can run an api request and display the data", async ({ page }) => {
]
]);
- await page.goto("http://localhost:9876");
+ await mock_theme(page);
+ await wait_for_page(page);
await page.getByLabel("Covid").check();
await page.getByLabel("Lung Cancer").check();
diff --git a/ui/packages/app/test/input_output.spec.ts b/ui/packages/app/test/input_output.spec.ts
--- a/ui/packages/app/test/input_output.spec.ts
+++ b/ui/packages/app/test/input_output.spec.ts
@@ -1,4 +1,5 @@
import { test, expect, Page } from "@playwright/test";
+import { mock_theme, wait_for_page } from "./utils";
function mock_demo(page: Page, demo: string) {
return page.route("**/config", (route) => {
@@ -28,7 +29,8 @@ function mock_api(page: Page, body: Array<unknown>) {
test("a component acts as both input and output", async ({ page }) => {
await mock_demo(page, "input_output");
await mock_api(page, [["tset"]]);
- await page.goto("http://localhost:9876");
+ await mock_theme(page);
+ await wait_for_page(page);
const textbox = await page.getByLabel("Input-Output");
diff --git a/ui/packages/app/test/kitchen_sink.spec.ts b/ui/packages/app/test/kitchen_sink.spec.ts
--- a/ui/packages/app/test/kitchen_sink.spec.ts
+++ b/ui/packages/app/test/kitchen_sink.spec.ts
@@ -1,5 +1,6 @@
import { test, expect, Page } from "@playwright/test";
import { BASE64_IMAGE, BASE64_AUDIO } from "./media_data";
+import { mock_theme, wait_for_page } from "./utils";
function mock_demo(page: Page, demo: string) {
return page.route("**/config", (route) => {
@@ -28,7 +29,8 @@ function mock_api(page: Page, body: Array<unknown>) {
test("test inputs", async ({ page }) => {
await mock_demo(page, "kitchen_sink");
- await page.goto("http://localhost:9876");
+ await mock_theme(page);
+ await wait_for_page(page);
const textbox = await page.getByLabel("Textbox").nth(0);
await expect(textbox).toHaveValue("Lorem ipsum");
@@ -209,7 +211,8 @@ test("test outputs", async ({ page }) => {
]
]);
- await page.goto("http://localhost:9876");
+ await mock_theme(page);
+ await wait_for_page(page);
const submit_button = await page.locator("button", { hasText: /Submit/ });
diff --git a/ui/packages/app/test/outbreak_forecast.spec.ts b/ui/packages/app/test/outbreak_forecast.spec.ts
--- a/ui/packages/app/test/outbreak_forecast.spec.ts
+++ b/ui/packages/app/test/outbreak_forecast.spec.ts
@@ -1,5 +1,6 @@
import { test, expect, Page } from "@playwright/test";
import { BASE64_PLOT_IMG } from "./media_data";
+import { mock_theme, wait_for_page } from "./utils";
function mock_demo(page: Page, demo: string) {
return page.route("**/config", (route) => {
@@ -12,17 +13,6 @@ function mock_demo(page: Page, demo: string) {
});
}
-function mock_theme(page: Page) {
- return page.route("**/theme.css", (route) => {
- return route.fulfill({
- headers: {
- "Access-Control-Allow-Origin": "*"
- },
- path: `./test/mocks/theme.css`
- });
- });
-}
-
function mock_api(page: Page, body: Array<unknown>) {
return page.route("**/run/predict", (route) => {
const id = JSON.parse(route.request().postData()!).fn_index;
@@ -41,7 +31,7 @@ test("matplotlib", async ({ page }) => {
await mock_demo(page, "outbreak_forecast");
await mock_api(page, [[{ type: "matplotlib", plot: BASE64_PLOT_IMG }]]);
await mock_theme(page);
- await page.goto("http://localhost:9876");
+ await wait_for_page(page);
await page.getByLabel("Plot Type").click();
await page.getByRole("button", { name: "Matplotlib" }).click();
diff --git a/ui/packages/app/test/slider_release.spec.ts b/ui/packages/app/test/slider_release.spec.ts
--- a/ui/packages/app/test/slider_release.spec.ts
+++ b/ui/packages/app/test/slider_release.spec.ts
@@ -1,4 +1,5 @@
import { test, expect, Page, Locator } from "@playwright/test";
+import { mock_theme, wait_for_page } from "./utils";
//taken from: https://github.com/microsoft/playwright/issues/20032
async function changeSlider(
@@ -59,8 +60,8 @@ function mock_api(page: Page) {
test("slider release", async ({ page }) => {
await mock_demo(page, "slider_release");
await mock_api(page);
- await page.goto("http://localhost:9876");
-
+ await mock_theme(page);
+ await wait_for_page(page);
const slider = page.getByLabel("Slider");
await changeSlider(page, slider, slider, 0.7);
diff --git a/ui/packages/app/test/utils.ts b/ui/packages/app/test/utils.ts
new file mode 100644
--- /dev/null
+++ b/ui/packages/app/test/utils.ts
@@ -0,0 +1,17 @@
+import type { Page } from "@playwright/test";
+
+export function mock_theme(page: Page) {
+ return page.route("**/theme.css", (route) => {
+ return route.fulfill({
+ headers: {
+ "Access-Control-Allow-Origin": "*"
+ },
+ path: `./test/mocks/theme.css`
+ });
+ });
+}
+
+export async function wait_for_page(page: Page) {
+ await page.goto("http://localhost:9876");
+ await page.waitForResponse("**/theme.css");
+}
| `Code` component label overlays on text after refreshing the page
### Describe the bug
Very strange bug:
When the Gradio app page containing the `Code` component is refreshed (but NOT hard-refreshed), then the label in the `Code` component overlays on the code string. Here's how it looks:
<img width="549" alt="image" src="https://user-images.githubusercontent.com/1778297/225155003-2c1f3069-6f9c-49b8-9696-15ffa1df9342.png">
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Visit: https://huggingface.co/spaces/loubnabnl/santacoder-demo
Do a regular refresh (not a hard refresh) to see the issue
### Screenshot
_No response_
### Logs
```shell
N/A
```
### System Info
```shell
Gradio 3.21
```
### Severity
annoying
| 2023-03-22T12:48:46 |
|
gradio-app/gradio | 3,586 | gradio-app__gradio-3586 | [
"3658"
] | ebfce0f784e50d9ae9673eb97c0db185f44bc13a | diff --git a/demo/fake_gan/run.py b/demo/fake_gan/run.py
--- a/demo/fake_gan/run.py
+++ b/demo/fake_gan/run.py
@@ -36,7 +36,7 @@ def fake_gan():
gallery = gr.Gallery(
label="Generated images", show_label=False, elem_id="gallery"
- ).style(grid=[2], height="auto")
+ ).style(columns=[2], rows=[2], object_fit="contain", height="auto")
btn.click(fake_gan, None, gallery)
diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -4060,25 +4060,39 @@ def style(
self,
*,
grid: int | Tuple | None = None,
+ columns: int | Tuple | None = None,
+ rows: int | Tuple | None = None,
height: str | None = None,
container: bool | None = None,
preview: bool | None = None,
+ object_fit: str | None = None,
**kwargs,
):
"""
This method can be used to change the appearance of the gallery component.
Parameters:
- grid: Represents the number of images that should be shown in one row, for each of the six standard screen sizes (<576px, <768px, <992px, <1200px, <1400px, >1400px). if fewer that 6 are given then the last will be used for all subsequent breakpoints
+ grid: ('grid' has been renamed to 'columns') Represents the number of images that should be shown in one row, for each of the six standard screen sizes (<576px, <768px, <992px, <1200px, <1400px, >1400px). if fewer that 6 are given then the last will be used for all subsequent breakpoints columns: Represents the number of columns in the image grid, for each of the six standard screen sizes (<576px, <768px, <992px, <1200px, <1400px, >1400px). if fewer that 6 are given then the last will be used for all subsequent breakpoints
+ rows: Represents the number of rows in the image grid, for each of the six standard screen sizes (<576px, <768px, <992px, <1200px, <1400px, >1400px). if fewer that 6 are given then the last will be used for all subsequent breakpoints
height: Height of the gallery.
container: If True, will place gallery in a container - providing some extra padding around the border.
preview: If True, will display the Gallery in preview mode, which shows all of the images as thumbnails and allows the user to click on them to view them in full size.
+ object_fit: CSS object-fit property for the thumbnail images in the gallery. Can be "contain", "cover", "fill", "none", or "scale-down".
"""
if grid is not None:
- self._style["grid"] = grid
+ warnings.warn(
+ "The 'grid' parameter will be deprecated. Please use 'columns' instead.",
+ )
+ self._style["grid_cols"] = grid
+ if columns is not None:
+ self._style["grid_cols"] = columns
+ if rows is not None:
+ self._style["grid_rows"] = rows
if height is not None:
self._style["height"] = height
if preview is not None:
self._style["preview"] = preview
+ if object_fit is not None:
+ self._style["object_fit"] = object_fit
Component.style(self, container=container, **kwargs)
return self
| Button.click gets an empty file input
### Describe the bug
If I use a `gr.File` object as an input of `Button.click()`, the corresponding event handler function will get an empty file.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```python
import gradio as gr
with gr.Blocks() as demo:
file = gr.File()
upload = gr.Button('Upload')
data = gr.Textbox(label='Data')
upload.click(lambda f: f.read(), inputs=[file], outputs=[data])
demo.launch()
```
### Screenshot
<img width="765" alt="image" src="https://user-images.githubusercontent.com/5109942/228165303-d90475df-c333-4d89-9950-9f50679e2234.png">
### Logs
```shell
No helpful logs.
```
### System Info
```shell
gradio==v3.23.1b2
Mac
```
### Severity
blocking upgrade to latest gradio version
| 2023-03-22T18:09:45 |
||
gradio-app/gradio | 3,660 | gradio-app__gradio-3660 | [
"3658"
] | 0df03a051f38dec78daa2d66b2cedc5444e3bd36 | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -2454,7 +2454,7 @@ class File(
):
"""
Creates a file component that allows uploading generic file (when used as an input) and or displaying generic files (output).
- Preprocessing: passes the uploaded file as a {file-object} or {List[file-object]} depending on `file_count` (or a {bytes}/{List{bytes}} depending on `type`)
+ Preprocessing: passes the uploaded file as a {tempfile._TemporaryFileWrapper} or {List[tempfile._TemporaryFileWrapper]} depending on `file_count` (or a {bytes}/{List{bytes}} depending on `type`)
Postprocessing: expects function to return a {str} path to a file, or {List[str]} consisting of paths to files.
Examples-format: a {str} path to a local file that populates the component.
Demos: zip_to_json, zip_files
| Button.click gets an empty file input
### Describe the bug
If I use a `gr.File` object as an input of `Button.click()`, the corresponding event handler function will get an empty file.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```python
import gradio as gr
with gr.Blocks() as demo:
file = gr.File()
upload = gr.Button('Upload')
data = gr.Textbox(label='Data')
upload.click(lambda f: f.read(), inputs=[file], outputs=[data])
demo.launch()
```
### Screenshot
<img width="765" alt="image" src="https://user-images.githubusercontent.com/5109942/228165303-d90475df-c333-4d89-9950-9f50679e2234.png">
### Logs
```shell
No helpful logs.
```
### System Info
```shell
gradio==v3.23.1b2
Mac
```
### Severity
blocking upgrade to latest gradio version
| 2023-03-28T08:18:21 |
||
gradio-app/gradio | 3,869 | gradio-app__gradio-3869 | [
"3864"
] | 69e915832a562b946568f2b00b82bc5a87b1357b | diff --git a/gradio/__init__.py b/gradio/__init__.py
--- a/gradio/__init__.py
+++ b/gradio/__init__.py
@@ -10,6 +10,8 @@
from gradio.components import (
HTML,
JSON,
+ AnnotatedImage,
+ Annotatedimage,
Audio,
BarPlot,
Button,
@@ -30,8 +32,6 @@
HighlightedText,
Highlightedtext,
Image,
- AnnotatedImage,
- Annotatedimage,
Interpretation,
Json,
Label,
diff --git a/gradio/themes/__init__.py b/gradio/themes/__init__.py
--- a/gradio/themes/__init__.py
+++ b/gradio/themes/__init__.py
@@ -13,4 +13,4 @@
def builder(*args, **kwargs):
from gradio.themes.builder import demo
- demo.launch(*args, **kwargs)
+ return demo.launch(*args, **kwargs)
diff --git a/gradio/themes/builder.py b/gradio/themes/builder.py
--- a/gradio/themes/builder.py
+++ b/gradio/themes/builder.py
@@ -2,6 +2,8 @@
import time
from typing import Iterable
+from gradio_client.documentation import document_fn
+
import gradio as gr
themes = [
@@ -16,10 +18,8 @@
palette_range = [50, 100, 200, 300, 400, 500, 600, 700, 800, 900, 950]
size_range = ["xxs", "xs", "sm", "md", "lg", "xl", "xxl"]
-docs_theme_core = gr.documentation.document_fn(gr.themes.Base.__init__, gr.themes.Base)[
- 1
-]
-docs_theme_vars = gr.documentation.document_fn(gr.themes.Base.set, gr.themes.Base)[1]
+docs_theme_core = document_fn(gr.themes.Base.__init__, gr.themes.Base)[1]
+docs_theme_vars = document_fn(gr.themes.Base.set, gr.themes.Base)[1]
def get_docstr(var):
| diff --git a/test/test_theme_sharing.py b/test/test_theme_sharing.py
--- a/test/test_theme_sharing.py
+++ b/test/test_theme_sharing.py
@@ -414,3 +414,8 @@ def test_can_make_private(self, mock_1, mock_2, mock_3):
exist_ok=True,
private=True,
)
+
+
+def test_theme_builder_launches():
+ gr.themes.builder(prevent_thread_lock=True)
+ gr.close_all()
| theme builder example doesn't work for `3.25.0` or later
### Describe the bug
As mentioned in [Gradio Themeing Guide](https://gradio.app/theming-guide/#using-the-theme-builder) and thanks to #3664 ,
an awesome Gradio theme builder should be executed with
```python
import gradio as gr
gr.themes.builder()
```
But this raises an error in `gradio==3.25.0` or later version with the following message:
```plain
AttributeError: module 'gradio' has no attribute 'documentation'
```
I think the problem is that `gr.documentation` is moved to `gradio_client.documentation` and you mistakenly lost those ones.
You may change the [following lines](https://github.com/gradio-app/gradio/blob/v3.24.1/gradio/themes/builder.py#L19-L22) from:
```python
docs_theme_core = gr.documentation.document_fn(gr.themes.Base.__init__, gr.themes.Base)[
1
]
docs_theme_vars = gr.documentation.document_fn(gr.themes.Base.set, gr.themes.Base)[1]
```
to
```python
from gradio_client.documentation import document_fn
docs_theme_core = document_fn(gr.themes.Base.__init__, gr.themes.Base)[
1
]
docs_theme_vars = document_fn(gr.themes.Base.set, gr.themes.Base)[1]
```
FYI, This also raises an error in [`theme_builder` demo in HF Spaces](https://huggingface.co/spaces/gradio/theme_builder)
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```python
import gradio as gr
gr.themes.builder()
```
### Screenshot
_No response_
### Logs
```shell
Traceback (most recent call last):
File "demo.py", line 15, in <module>
gr.themes.builder()
File "/home/hksong/.virtualenvs/gradio_theme/lib/python3.8/site-packages/gradio/themes/__init__.py", line 14, in builder
from gradio.themes.builder import demo
File "/home/hksong/.virtualenvs/gradio_theme/lib/python3.8/site-packages/gradio/themes/builder.py", line 19, in <module>
docs_theme_core = gr.documentation.document_fn(gr.themes.Base.__init__, gr.themes.Base)[
AttributeError: module 'gradio' has no attribute 'documentation'
```
### System Info
```shell
3.25.0 (or later)
```
### Severity
annoying
| Ah you’re right, thanks @deepkyu! Would you like to make a PR to fix this? | 2023-04-15T05:58:40 |
gradio-app/gradio | 4,068 | gradio-app__gradio-4068 | [
"3514"
] | f763912bc8a05313c9de713a29797dbf8bdd9235 | diff --git a/gradio/utils.py b/gradio/utils.py
--- a/gradio/utils.py
+++ b/gradio/utils.py
@@ -41,12 +41,12 @@
import matplotlib
import requests
from markdown_it import MarkdownIt
-from pygments import highlight
-from pygments.formatters import HtmlFormatter
-from pygments.lexers import get_lexer_by_name
from mdit_py_plugins.dollarmath.index import dollarmath_plugin
from mdit_py_plugins.footnote.index import footnote_plugin
from pydantic import BaseModel, parse_obj_as
+from pygments import highlight
+from pygments.formatters import HtmlFormatter
+from pygments.lexers import get_lexer_by_name
import gradio
from gradio.context import Context
@@ -845,22 +845,25 @@ def get_type_hints(fn):
return {}
+def is_special_typed_parameter(name, parameter_types):
+ from gradio.helpers import EventData
+ from gradio.routes import Request
+
+ """Checks if parameter has a type hint designating it as a gr.Request or gr.EventData"""
+ hint = parameter_types.get(name)
+ if not hint:
+ return False
+ is_request = hint == Request
+ is_event_data = inspect.isclass(hint) and issubclass(hint, EventData)
+ return is_request or is_event_data
+
+
def check_function_inputs_match(fn: Callable, inputs: List, inputs_as_dict: bool):
"""
Checks if the input component set matches the function
Returns: None if valid, a string error message if mismatch
"""
- def is_special_typed_parameter(name):
- from gradio.helpers import EventData
- from gradio.routes import Request
-
- """Checks if parameter has a type hint designating it as a gr.Request or gr.EventData"""
- is_request = parameter_types.get(name, "") == Request
- # use int in the fall-back as that will always be false
- is_event_data = issubclass(parameter_types.get(name, int), EventData)
- return is_request or is_event_data
-
signature = inspect.signature(fn)
parameter_types = get_type_hints(fn)
min_args = 0
@@ -869,7 +872,7 @@ def is_special_typed_parameter(name):
for name, param in signature.parameters.items():
has_default = param.default != param.empty
if param.kind in [param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD]:
- if not is_special_typed_parameter(name):
+ if not is_special_typed_parameter(name, parameter_types):
if not has_default:
min_args += 1
max_args += 1
| diff --git a/test/test_utils.py b/test/test_utils.py
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -5,6 +5,7 @@
import sys
import unittest.mock as mock
import warnings
+from typing import List
from unittest.mock import MagicMock
import pytest
@@ -14,7 +15,7 @@
from pydantic import BaseModel
from typing_extensions import Literal
-from gradio import EventData
+from gradio import EventData, Request
from gradio.context import Context
from gradio.test_data.blocks_configs import (
XRAY_CONFIG,
@@ -34,6 +35,7 @@
get_local_ip_address,
get_type_hints,
ipython_check,
+ is_special_typed_parameter,
kaggle_check,
launch_analytics,
readme_to_html,
@@ -633,6 +635,16 @@ class GenericObject:
assert len(get_type_hints(GenericObject())) == 0
+ def test_is_special_typed_parameter(self):
+ def func(a: List[str], b: Literal["a", "b"], c, d: Request):
+ pass
+
+ hints = get_type_hints(func)
+ assert not is_special_typed_parameter("a", hints)
+ assert not is_special_typed_parameter("b", hints)
+ assert not is_special_typed_parameter("c", hints)
+ assert is_special_typed_parameter("d", hints)
+
class TestCheckFunctionInputsMatch:
def test_check_function_inputs_match(self):
| "issubclass() arg 1 must be a class" error appears when using input parameters with Optional type hint
### Describe the bug
I'm getting an error `issubclass() arg 1 must be a class` when using Optional typing in the target function. The problem started appearing with version `3.21.0`
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Steps to reproduce:
1. Install gradio `3.21.0`
2. Target function with the following type hints:
```
def transcribe(
language: Optional[str] = 'auto',
):
```
3. Run gradio application
AR:
```
File ".../app.py", line 67, in <module>
allow_flagging='never',
File ".../.venv/lib/python3.7/site-packages/gradio/interface.py", line 463, in __init__
self.attach_submit_events(submit_btn, stop_btn)
File ".../.venv/lib/python3.7/site-packages/gradio/interface.py", line 624, in attach_submit_events
postprocess=not (self.api_mode),
File ".../.venv/lib/python3.7/site-packages/gradio/events.py", line 147, in __call__
trigger_only_on_success=self.trigger_only_on_success,
File ".../.venv/lib/python3.7/site-packages/gradio/blocks.py", line 190, in set_event_trigger
check_function_inputs_match(fn, inputs, inputs_as_dict)
File ".../.venv/lib/python3.7/site-packages/gradio/utils.py", line 943, in check_function_inputs_match
if not is_special_typed_parameter(name):
File ".../.venv/lib/python3.7/site-packages/gradio/utils.py", line 932, in is_special_typed_parameter
is_event_data = issubclass(parameter_types.get(name, int), EventData)
TypeError: issubclass() arg 1 must be a class
```
The problem is that the first parameter for `issubclass` is `typing.Union[str, NoneType]`
### Screenshot
_No response_
### Logs
_Stacktrace is attached in the reproduction steps._
### System Info
```shell
Gradio 3.21.0, Python 3.7
```
### Severity
serious, but I can work around it by removing annotations
| @hnsywangxin , I didn't try to downgrade the version. The simpler way was to remove type annotations.
> @hnsywangxin , I didn't try to downgrade the version. The simpler way was to remove type annotations.
Same, removing the annotations solved.
I'm having the same problem. It is a more generic problem than you described.
For me it happens with the following code:
```
def from_raw(raw_pdf: Union[str, bytes]) -> str:
...
pdf_file.upload(from_raw, inputs=[file], outputs=[text])
```
It seems like gradio events can not handle type annotations with the `fn` arguments.
This should be fixed.
This issue has not been resolved. On Gradio 3.28 it still occurs, and removing the type annotations still seems to be the only resolution.
annoying
thanks for your advice, removing type annotations fixed it for me too
Will take a look and get a fix out this week! | 2023-05-04T18:20:54 |
gradio-app/gradio | 4,256 | gradio-app__gradio-4256 | [
"3592"
] | 1151c5253554cb87ebd4a44a8a470ac215ff782b | diff --git a/client/python/gradio_client/client.py b/client/python/gradio_client/client.py
--- a/client/python/gradio_client/client.py
+++ b/client/python/gradio_client/client.py
@@ -785,8 +785,8 @@ def serialize(self, *data) -> tuple:
if t in ["file", "uploadbutton"]
]
uploaded_files = self._upload(files)
- self._add_uploaded_files_to_data(uploaded_files, list(data))
-
+ data = list(data)
+ self._add_uploaded_files_to_data(uploaded_files, data)
o = tuple([s.serialize(d) for s, d in zip(self.serializers, data)])
return o
diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -20,7 +20,7 @@
from enum import Enum
from pathlib import Path
from types import ModuleType
-from typing import TYPE_CHECKING, Any, Callable, Dict, cast
+from typing import TYPE_CHECKING, Any, Callable, Dict
import aiofiles
import altair as alt
@@ -217,14 +217,16 @@ def __init__(
if callable(load_fn):
self.attach_load_event(load_fn, every)
- def hash_file(self, file_path: str, chunk_num_blocks: int = 128) -> str:
+ @staticmethod
+ def hash_file(file_path: str, chunk_num_blocks: int = 128) -> str:
sha1 = hashlib.sha1()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(chunk_num_blocks * sha1.block_size), b""):
sha1.update(chunk)
return sha1.hexdigest()
- def hash_url(self, url: str, chunk_num_blocks: int = 128) -> str:
+ @staticmethod
+ def hash_url(url: str, chunk_num_blocks: int = 128) -> str:
sha1 = hashlib.sha1()
remote = urllib.request.urlopen(url)
max_file_size = 100 * 1024 * 1024 # 100MB
@@ -237,7 +239,14 @@ def hash_url(self, url: str, chunk_num_blocks: int = 128) -> str:
sha1.update(data)
return sha1.hexdigest()
- def hash_base64(self, base64_encoding: str, chunk_num_blocks: int = 128) -> str:
+ @staticmethod
+ def hash_bytes(bytes: bytes):
+ sha1 = hashlib.sha1()
+ sha1.update(bytes)
+ return sha1.hexdigest()
+
+ @staticmethod
+ def hash_base64(base64_encoding: str, chunk_num_blocks: int = 128) -> str:
sha1 = hashlib.sha1()
for i in range(0, len(base64_encoding), chunk_num_blocks * sha1.block_size):
data = base64_encoding[i : i + chunk_num_blocks * sha1.block_size]
@@ -251,9 +260,8 @@ def make_temp_copy_if_needed(self, file_path: str) -> str:
temp_dir = Path(self.DEFAULT_TEMP_DIR) / temp_dir
temp_dir.mkdir(exist_ok=True, parents=True)
- f = tempfile.NamedTemporaryFile(delete=False, dir=temp_dir)
- f.name = client_utils.strip_invalid_filename_characters(Path(file_path).name)
- full_temp_file_path = str(utils.abspath(temp_dir / f.name))
+ name = client_utils.strip_invalid_filename_characters(Path(file_path).name)
+ full_temp_file_path = str(utils.abspath(temp_dir / name))
if not Path(full_temp_file_path).exists():
shutil.copy2(file_path, full_temp_file_path)
@@ -267,15 +275,14 @@ async def save_uploaded_file(self, file: UploadFile, upload_dir: str) -> str:
) # Since the full file is being uploaded anyways, there is no benefit to hashing the file.
temp_dir = Path(upload_dir) / temp_dir
temp_dir.mkdir(exist_ok=True, parents=True)
- output_file_obj = tempfile.NamedTemporaryFile(delete=False, dir=temp_dir)
if file.filename:
file_name = Path(file.filename).name
- output_file_obj.name = client_utils.strip_invalid_filename_characters(
- file_name
- )
+ name = client_utils.strip_invalid_filename_characters(file_name)
+ else:
+ name = f"tmp{secrets.token_hex(5)}"
- full_temp_file_path = str(utils.abspath(temp_dir / output_file_obj.name))
+ full_temp_file_path = str(utils.abspath(temp_dir / name))
async with aiofiles.open(full_temp_file_path, "wb") as output_file:
while True:
@@ -292,10 +299,9 @@ def download_temp_copy_if_needed(self, url: str) -> str:
temp_dir = self.hash_url(url)
temp_dir = Path(self.DEFAULT_TEMP_DIR) / temp_dir
temp_dir.mkdir(exist_ok=True, parents=True)
- f = tempfile.NamedTemporaryFile(delete=False, dir=temp_dir)
- f.name = client_utils.strip_invalid_filename_characters(Path(url).name)
- full_temp_file_path = str(utils.abspath(temp_dir / f.name))
+ name = client_utils.strip_invalid_filename_characters(Path(url).name)
+ full_temp_file_path = str(utils.abspath(temp_dir / name))
if not Path(full_temp_file_path).exists():
with requests.get(url, stream=True) as r, open(
@@ -323,8 +329,7 @@ def base64_to_temp_file_if_needed(
file_name = f"file.{guess_extension}"
else:
file_name = "file"
- f = tempfile.NamedTemporaryFile(delete=False, dir=temp_dir)
- f.name = file_name # type: ignore
+
full_temp_file_path = str(utils.abspath(temp_dir / file_name)) # type: ignore
if not Path(full_temp_file_path).exists():
@@ -335,6 +340,36 @@ def base64_to_temp_file_if_needed(
self.temp_files.add(full_temp_file_path)
return full_temp_file_path
+ def pil_to_temp_file(self, img: _Image.Image, dir: str, format="png") -> str:
+ bytes_data = processing_utils.encode_pil_to_bytes(img, format)
+ temp_dir = Path(dir) / self.hash_bytes(bytes_data)
+ temp_dir.mkdir(exist_ok=True, parents=True)
+ filename = str(temp_dir / f"image.{format}")
+ img.save(filename, pnginfo=processing_utils.get_pil_metadata(img))
+ return filename
+
+ def img_array_to_temp_file(self, arr: np.ndarray, dir: str) -> str:
+ pil_image = _Image.fromarray(
+ processing_utils._convert(arr, np.uint8, force_copy=False)
+ )
+ return self.pil_to_temp_file(pil_image, dir, format="png")
+
+ def audio_to_temp_file(
+ self, data: np.ndarray, sample_rate: int, dir: str, format: str
+ ):
+ temp_dir = Path(dir) / self.hash_bytes(data.tobytes())
+ temp_dir.mkdir(exist_ok=True, parents=True)
+ filename = str(temp_dir / f"audio.{format}")
+ processing_utils.audio_to_file(sample_rate, data, filename, format=format)
+ return filename
+
+ def file_bytes_to_file(self, data: bytes, dir: str, file_name: str):
+ path = Path(dir) / self.hash_bytes(data)
+ path.mkdir(exist_ok=True, parents=True)
+ path = path / Path(file_name).name
+ path.write_bytes(data)
+ return path
+
def get_config(self):
config = {
"label": self.label,
@@ -1758,12 +1793,11 @@ def _format_image(
elif self.type == "numpy":
return np.array(im)
elif self.type == "filepath":
- file_obj = tempfile.NamedTemporaryFile(
- delete=False,
- suffix=(f".{fmt.lower()}" if fmt is not None else ".png"),
+ path = self.pil_to_temp_file(
+ im, dir=self.DEFAULT_TEMP_DIR, format=fmt or "png"
)
- im.save(file_obj.name)
- return self.make_temp_copy_if_needed(file_obj.name)
+ self.temp_files.add(path)
+ return path
else:
raise ValueError(
"Unknown type: "
@@ -2259,8 +2293,7 @@ def srt_to_vtt(srt_file_path, vtt_file_path):
# HTML5 only support vtt format
if Path(subtitle).suffix == ".srt":
temp_file = tempfile.NamedTemporaryFile(
- delete=False,
- suffix=".vtt",
+ delete=False, suffix=".vtt", dir=self.DEFAULT_TEMP_DIR
)
srt_to_vtt(subtitle, temp_file.name)
@@ -2483,7 +2516,9 @@ def tokenize(self, x):
# Handle the leave one outs
leave_one_out_data = np.copy(data)
leave_one_out_data[start:stop] = 0
- file = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
+ file = tempfile.NamedTemporaryFile(
+ delete=False, suffix=".wav", dir=self.DEFAULT_TEMP_DIR
+ )
processing_utils.audio_to_file(sample_rate, leave_one_out_data, file.name)
out_data = client_utils.encode_file_to_base64(file.name)
leave_one_out_sets.append(out_data)
@@ -2494,7 +2529,9 @@ def tokenize(self, x):
token = np.copy(data)
token[0:start] = 0
token[stop:] = 0
- file = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
+ file = tempfile.NamedTemporaryFile(
+ delete=False, suffix=".wav", dir=self.DEFAULT_TEMP_DIR
+ )
processing_utils.audio_to_file(sample_rate, token, file.name)
token_data = client_utils.encode_file_to_base64(file.name)
file.close()
@@ -2525,7 +2562,7 @@ def get_masked_inputs(self, tokens, binary_mask_matrix):
masked_input = np.copy(zero_input)
for t, b in zip(token_data, binary_mask_vector):
masked_input = masked_input + t * int(b)
- file = tempfile.NamedTemporaryFile(delete=False)
+ file = tempfile.NamedTemporaryFile(delete=False, dir=self.DEFAULT_TEMP_DIR)
processing_utils.audio_to_file(sample_rate, masked_input, file.name)
masked_data = client_utils.encode_file_to_base64(file.name)
file.close()
@@ -2546,11 +2583,9 @@ def postprocess(self, y: tuple[int, np.ndarray] | str | None) -> str | dict | No
return {"name": y, "data": None, "is_file": True}
if isinstance(y, tuple):
sample_rate, data = y
- file = tempfile.NamedTemporaryFile(suffix=f".{self.format}", delete=False)
- processing_utils.audio_to_file(
- sample_rate, data, file.name, format=self.format
+ file_path = self.audio_to_temp_file(
+ data, sample_rate, dir=self.DEFAULT_TEMP_DIR, format=self.format
)
- file_path = str(utils.abspath(file.name))
self.temp_files.add(file_path)
else:
file_path = self.make_temp_copy_if_needed(y)
@@ -2720,14 +2755,21 @@ def process_single_file(f) -> bytes | tempfile._TemporaryFileWrapper:
)
if self.type == "file":
if is_file:
- temp_file_path = self.make_temp_copy_if_needed(file_name)
- file = tempfile.NamedTemporaryFile(delete=False)
- file.name = temp_file_path
- file.orig_name = file_name # type: ignore
+ path = self.make_temp_copy_if_needed(file_name)
else:
- file = client_utils.decode_base64_to_file(data, file_path=file_name)
- file.orig_name = file_name # type: ignore
- self.temp_files.add(str(utils.abspath(file.name)))
+ data, _ = client_utils.decode_base64_to_binary(data)
+ path = self.file_bytes_to_file(
+ data, dir=self.DEFAULT_TEMP_DIR, file_name=file_name
+ )
+ path = str(utils.abspath(path))
+ self.temp_files.add(path)
+
+ # Creation of tempfiles here
+ file = tempfile.NamedTemporaryFile(
+ delete=False, dir=self.DEFAULT_TEMP_DIR
+ )
+ file.name = path
+ file.orig_name = file_name # type: ignore
return file
elif (
self.type == "binary" or self.type == "bytes"
@@ -2777,13 +2819,14 @@ def postprocess(
for file in y
]
else:
- return {
+ d = {
"orig_name": Path(y).name,
"name": self.make_temp_copy_if_needed(y),
"size": Path(y).stat().st_size,
"data": None,
"is_file": True,
}
+ return d
def style(
self,
@@ -3472,14 +3515,19 @@ def process_single_file(f) -> bytes | tempfile._TemporaryFileWrapper:
)
if self.type == "file":
if is_file:
- temp_file_path = self.make_temp_copy_if_needed(file_name)
- file = tempfile.NamedTemporaryFile(delete=False)
- file.name = temp_file_path
- file.orig_name = file_name # type: ignore
+ path = self.make_temp_copy_if_needed(file_name)
else:
- file = client_utils.decode_base64_to_file(data, file_path=file_name)
- file.orig_name = file_name # type: ignore
- self.temp_files.add(str(utils.abspath(file.name)))
+ data, _ = client_utils.decode_base64_to_binary(data)
+ path = self.file_bytes_to_file(
+ data, dir=self.DEFAULT_TEMP_DIR, file_name=file_name
+ )
+ path = str(utils.abspath(path))
+ self.temp_files.add(path)
+ file = tempfile.NamedTemporaryFile(
+ delete=False, dir=self.DEFAULT_TEMP_DIR
+ )
+ file.name = path
+ file.orig_name = file_name # type: ignore
return file
elif self.type == "bytes":
if is_file:
@@ -4068,11 +4116,11 @@ def postprocess(
base_img_path = base_img
base_img = np.array(_Image.open(base_img))
elif isinstance(base_img, np.ndarray):
- base_file = processing_utils.save_array_to_file(base_img)
- base_img_path = str(utils.abspath(base_file.name))
+ base_file = self.img_array_to_temp_file(base_img, dir=self.DEFAULT_TEMP_DIR)
+ base_img_path = str(utils.abspath(base_file))
elif isinstance(base_img, _Image.Image):
- base_file = processing_utils.save_pil_to_file(base_img)
- base_img_path = str(utils.abspath(base_file.name))
+ base_file = self.pil_to_temp_file(base_img, dir=self.DEFAULT_TEMP_DIR)
+ base_img_path = str(utils.abspath(base_file))
base_img = np.array(base_img)
else:
raise ValueError(
@@ -4116,8 +4164,10 @@ def hex_to_rgb(value):
colored_mask_img = _Image.fromarray((colored_mask).astype(np.uint8))
- mask_file = processing_utils.save_pil_to_file(colored_mask_img)
- mask_file_path = str(utils.abspath(mask_file.name))
+ mask_file = self.pil_to_temp_file(
+ colored_mask_img, dir=self.DEFAULT_TEMP_DIR
+ )
+ mask_file_path = str(utils.abspath(mask_file))
self.temp_files.add(mask_file_path)
sections.append(
@@ -4404,12 +4454,12 @@ def postprocess(
if isinstance(img, (tuple, list)):
img, caption = img
if isinstance(img, np.ndarray):
- file = processing_utils.save_array_to_file(img)
- file_path = str(utils.abspath(file.name))
+ file = self.img_array_to_temp_file(img, dir=self.DEFAULT_TEMP_DIR)
+ file_path = str(utils.abspath(file))
self.temp_files.add(file_path)
elif isinstance(img, _Image.Image):
- file = processing_utils.save_pil_to_file(img)
- file_path = str(utils.abspath(file.name))
+ file = self.pil_to_temp_file(img, dir=self.DEFAULT_TEMP_DIR)
+ file_path = str(utils.abspath(file))
self.temp_files.add(file_path)
elif isinstance(img, str):
if utils.validate_url(img):
diff --git a/gradio/processing_utils.py b/gradio/processing_utils.py
--- a/gradio/processing_utils.py
+++ b/gradio/processing_utils.py
@@ -2,6 +2,7 @@
import base64
import json
+import os
import shutil
import subprocess
import tempfile
@@ -64,13 +65,6 @@ def encode_plot_to_base64(plt):
return "data:image/png;base64," + base64_str
-def save_array_to_file(image_array, dir=None):
- pil_image = Image.fromarray(_convert(image_array, np.uint8, force_copy=False))
- file_obj = tempfile.NamedTemporaryFile(delete=False, suffix=".png", dir=dir)
- pil_image.save(file_obj)
- return file_obj
-
-
def get_pil_metadata(pil_image):
# Copy any text-only metadata
metadata = PngImagePlugin.PngInfo()
@@ -81,16 +75,14 @@ def get_pil_metadata(pil_image):
return metadata
-def save_pil_to_file(pil_image, dir=None):
- file_obj = tempfile.NamedTemporaryFile(delete=False, suffix=".png", dir=dir)
- pil_image.save(file_obj, pnginfo=get_pil_metadata(pil_image))
- return file_obj
+def encode_pil_to_bytes(pil_image, format="png"):
+ with BytesIO() as output_bytes:
+ pil_image.save(output_bytes, format, pnginfo=get_pil_metadata(pil_image))
+ return output_bytes.getvalue()
def encode_pil_to_base64(pil_image):
- with BytesIO() as output_bytes:
- pil_image.save(output_bytes, "PNG", pnginfo=get_pil_metadata(pil_image))
- bytes_data = output_bytes.getvalue()
+ bytes_data = encode_pil_to_bytes(pil_image)
base64_str = str(base64.b64encode(bytes_data), "utf-8")
return "data:image/png;base64," + base64_str
@@ -519,8 +511,8 @@ def video_is_playable(video_filepath: str) -> bool:
def convert_video_to_playable_mp4(video_path: str) -> str:
"""Convert the video to mp4. If something goes wrong return the original video."""
try:
- output_path = Path(video_path).with_suffix(".mp4")
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
+ output_path = Path(video_path).with_suffix(".mp4")
shutil.copy2(video_path, tmp_file.name)
# ffmpeg will automatically use h264 codec (playable in browser) when converting to mp4
ff = FFmpeg(
@@ -532,4 +524,7 @@ def convert_video_to_playable_mp4(video_path: str) -> str:
except FFRuntimeError as e:
print(f"Error converting video to browser-playable format {str(e)}")
output_path = video_path
+ finally:
+ # Remove temp file
+ os.remove(tmp_file.name) # type: ignore
return str(output_path)
| diff --git a/client/python/test/test_client.py b/client/python/test/test_client.py
--- a/client/python/test/test_client.py
+++ b/client/python/test/test_client.py
@@ -252,13 +252,19 @@ def test_upload_file_private_space(self):
with patch.object(
client.endpoints[0], "_upload", wraps=client.endpoints[0]._upload
) as upload:
- with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
- f.write("Hello from private space!")
-
- output = client.submit(1, "foo", f.name, api_name="/file_upload").result()
+ with patch.object(
+ client.endpoints[0], "serialize", wraps=client.endpoints[0].serialize
+ ) as serialize:
+ with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
+ f.write("Hello from private space!")
+
+ output = client.submit(
+ 1, "foo", f.name, api_name="/file_upload"
+ ).result()
with open(output) as f:
assert f.read() == "Hello from private space!"
upload.assert_called_once()
+ assert all(f["is_file"] for f in serialize.return_value())
with patch.object(
client.endpoints[1], "_upload", wraps=client.endpoints[0]._upload
diff --git a/test/conftest.py b/test/conftest.py
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -1,7 +1,9 @@
import inspect
import pathlib
+from contextlib import contextmanager
import pytest
+from gradio_client import Client
import gradio as gr
@@ -32,3 +34,24 @@ def io_components():
subclasses.append(subclass)
return subclasses
+
+
[email protected]
+def connect():
+ @contextmanager
+ def _connect(demo: gr.Blocks, serialize=True):
+ _, local_url, _ = demo.launch(prevent_thread_lock=True)
+ try:
+ yield Client(local_url, serialize=serialize)
+ finally:
+ # A more verbose version of .close()
+ # because we should set a timeout
+ # the tests that call .cancel() can get stuck
+ # waiting for the thread to join
+ if demo.enable_queue:
+ demo._queue.close()
+ demo.is_running = False
+ demo.server.should_exit = True
+ demo.server.thread.join(timeout=1)
+
+ return _connect
diff --git a/test/test_blocks.py b/test/test_blocks.py
--- a/test/test_blocks.py
+++ b/test/test_blocks.py
@@ -15,11 +15,14 @@
from string import capwords
from unittest.mock import patch
+import gradio_client as grc
+import numpy as np
import pytest
import uvicorn
import websockets
from fastapi.testclient import TestClient
from gradio_client import media_data
+from PIL import Image
import gradio as gr
from gradio.events import SelectData
@@ -463,6 +466,106 @@ def test_raise_error_if_event_queued_but_queue_not_enabled(self):
demo.close()
+class TestTempFile:
+ def test_pil_images_hashed(self, tmp_path, connect, monkeypatch):
+ images = [
+ Image.new("RGB", (512, 512), color) for color in ("red", "green", "blue")
+ ]
+
+ def create_images(n_images):
+ return random.sample(images, n_images)
+
+ monkeypatch.setenv("GRADIO_TEMP_DIR", str(tmp_path))
+ demo = gr.Interface(
+ create_images,
+ inputs=[gr.Slider(value=3, minimum=1, maximum=3, step=1)],
+ outputs=[gr.Gallery().style(grid=2, preview=True)],
+ )
+ with connect(demo) as client:
+ _ = client.predict(3)
+ _ = client.predict(3)
+ # only three files created
+ assert len([f for f in tmp_path.glob("**/*") if f.is_file()]) == 3
+
+ def test_no_empty_image_files(self, tmp_path, connect, monkeypatch):
+ file_dir = pathlib.Path(pathlib.Path(__file__).parent, "test_files")
+ image = str(file_dir / "bus.png")
+
+ monkeypatch.setenv("GRADIO_TEMP_DIR", str(tmp_path))
+ demo = gr.Interface(
+ lambda x: x,
+ inputs=gr.Image(type="filepath"),
+ outputs=gr.Image(),
+ )
+ with connect(demo) as client:
+ _ = client.predict(image)
+ _ = client.predict(image)
+ _ = client.predict(image)
+ # only three files created
+ assert len([f for f in tmp_path.glob("**/*") if f.is_file()]) == 1
+
+ @pytest.mark.parametrize("component", [gr.UploadButton, gr.File])
+ def test_file_component_uploads(self, component, tmp_path, connect, monkeypatch):
+ code_file = str(pathlib.Path(__file__))
+ monkeypatch.setenv("GRADIO_TEMP_DIR", str(tmp_path))
+ demo = gr.Interface(lambda x: x.name, component(), gr.File())
+ with connect(demo) as client:
+ _ = client.predict(code_file)
+ _ = client.predict(code_file)
+ # the upload route does not hash the file so 2 files from there
+ # We create two tempfiles (empty) because API says we return
+ # preprocess/postprocess will only create one file since we hash
+ # so 2 + 2 + 1 = 5
+ assert len([f for f in tmp_path.glob("**/*") if f.is_file()]) == 5
+
+ @pytest.mark.parametrize("component", [gr.UploadButton, gr.File])
+ def test_file_component_uploads_no_serialize(
+ self, component, tmp_path, connect, monkeypatch
+ ):
+ code_file = str(pathlib.Path(__file__))
+ monkeypatch.setenv("GRADIO_TEMP_DIR", str(tmp_path))
+ demo = gr.Interface(lambda x: x.name, component(), gr.File())
+ with connect(demo, serialize=False) as client:
+ _ = client.predict(gr.File().serialize(code_file))
+ _ = client.predict(gr.File().serialize(code_file))
+ # We skip the upload route in this case
+ # We create two tempfiles (empty) because API says we return
+ # preprocess/postprocess will only create one file since we hash
+ # so 2 + 1 = 3
+ assert len([f for f in tmp_path.glob("**/*") if f.is_file()]) == 3
+
+ def test_no_empty_video_files(self, tmp_path, monkeypatch, connect):
+ file_dir = pathlib.Path(pathlib.Path(__file__).parent, "test_files")
+ video = str(file_dir / "video_sample.mp4")
+ monkeypatch.setenv("GRADIO_TEMP_DIR", str(tmp_path))
+ demo = gr.Interface(lambda x: x, gr.Video(type="file"), gr.Video())
+ with connect(demo) as client:
+ _, url, _ = demo.launch(prevent_thread_lock=True)
+ client = grc.Client(url)
+ _ = client.predict(video)
+ _ = client.predict(video)
+ # During preprocessing we compute the hash based on base64
+ # In postprocessing we compute it based on the file
+ assert len([f for f in tmp_path.glob("**/*") if f.is_file()]) == 2
+
+ def test_no_empty_audio_files(self, tmp_path, monkeypatch, connect):
+ file_dir = pathlib.Path(pathlib.Path(__file__).parent, "test_files")
+ audio = str(file_dir / "audio_sample.wav")
+
+ def reverse_audio(audio):
+ sr, data = audio
+ return (sr, np.flipud(data))
+
+ monkeypatch.setenv("GRADIO_TEMP_DIR", str(tmp_path))
+ demo = gr.Interface(fn=reverse_audio, inputs=gr.Audio(), outputs=gr.Audio())
+ with connect(demo) as client:
+ _ = client.predict(audio)
+ _ = client.predict(audio)
+ # During preprocessing we compute the hash based on base64
+ # In postprocessing we compute it based on the file
+ assert len([f for f in tmp_path.glob("**/*") if f.is_file()]) == 2
+
+
class TestComponentsInBlocks:
def test_slider_random_value_config(self):
with gr.Blocks() as demo:
diff --git a/test/test_processing_utils.py b/test/test_processing_utils.py
--- a/test/test_processing_utils.py
+++ b/test/test_processing_utils.py
@@ -9,9 +9,9 @@
import numpy as np
import pytest
from gradio_client import media_data
-from PIL import Image
+from PIL import Image, ImageCms
-from gradio import processing_utils, utils
+from gradio import components, processing_utils, utils
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
@@ -54,16 +54,49 @@ def test_encode_pil_to_base64(self):
output_base64 = processing_utils.encode_pil_to_base64(img)
assert output_base64 == deepcopy(media_data.ARRAY_TO_BASE64_IMAGE)
- def test_save_pil_to_file_keeps_pnginfo(self):
+ def test_save_pil_to_file_keeps_pnginfo(self, tmp_path):
input_img = Image.open("gradio/test_data/test_image.png")
input_img = input_img.convert("RGB")
input_img.info = {"key1": "value1", "key2": "value2"}
- file_obj = processing_utils.save_pil_to_file(input_img)
+ file_obj = components.Image().pil_to_temp_file(input_img, dir=tmp_path)
output_img = Image.open(file_obj)
assert output_img.info == input_img.info
+ def test_np_pil_encode_to_the_same(self, tmp_path):
+ arr = np.random.randint(0, 255, size=(100, 100, 3), dtype=np.uint8)
+ pil = Image.fromarray(arr)
+ comp = components.Image()
+ assert comp.pil_to_temp_file(pil, dir=tmp_path) == comp.img_array_to_temp_file(
+ arr, dir=tmp_path
+ )
+
+ def test_encode_pil_to_temp_file_metadata_color_profile(self, tmp_path):
+ # Read image
+ img = Image.open("gradio/test_data/test_image.png")
+ img_metadata = Image.open("gradio/test_data/test_image.png")
+ img_metadata.info = {"key1": "value1", "key2": "value2"}
+
+ # Creating sRGB profile
+ profile = ImageCms.createProfile("sRGB")
+ profile2 = ImageCms.ImageCmsProfile(profile)
+ img.save(tmp_path / "img_color_profile.png", icc_profile=profile2.tobytes())
+ img_cp1 = Image.open(str(tmp_path / "img_color_profile.png"))
+
+ # Creating XYZ profile
+ profile = ImageCms.createProfile("XYZ")
+ profile2 = ImageCms.ImageCmsProfile(profile)
+ img.save(tmp_path / "img_color_profile_2.png", icc_profile=profile2.tobytes())
+ img_cp2 = Image.open(str(tmp_path / "img_color_profile_2.png"))
+
+ comp = components.Image()
+ img_path = comp.pil_to_temp_file(img, dir=tmp_path)
+ img_metadata_path = comp.pil_to_temp_file(img_metadata, dir=tmp_path)
+ img_cp1_path = comp.pil_to_temp_file(img_cp1, dir=tmp_path)
+ img_cp2_path = comp.pil_to_temp_file(img_cp2, dir=tmp_path)
+ assert len({img_path, img_metadata_path, img_cp1_path, img_cp2_path}) == 4
+
def test_encode_pil_to_base64_keeps_pnginfo(self):
input_img = Image.open("gradio/test_data/test_image.png")
input_img = input_img.convert("RGB")
@@ -205,9 +238,12 @@ def test_convert_video_to_playable_mp4(self, test_file_dir):
shutil.copy(
str(test_file_dir / "bad_video_sample.mp4"), tmp_not_playable_vid.name
)
- playable_vid = processing_utils.convert_video_to_playable_mp4(
- tmp_not_playable_vid.name
- )
+ with patch("os.remove", wraps=os.remove) as mock_remove:
+ playable_vid = processing_utils.convert_video_to_playable_mp4(
+ tmp_not_playable_vid.name
+ )
+ # check tempfile got deleted
+ assert not Path(mock_remove.call_args[0][0]).exists()
assert processing_utils.video_is_playable(playable_vid)
@patch("ffmpy.FFmpeg.run", side_effect=raise_ffmpy_runtime_exception)
| Gradio creates a lot of empty temporary folders on each run
### Describe the bug
Described in https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/8752#issuecomment-1481020332, every time a Gradio app is launched it creates multiple temporary folders, and a lot of them remain empty. This is somewhat important because a lot of webui users want to inspect intermediate outputs and it's hard to sift through dozens of temporary folders.
Even if there needs to be an autogenerated temp folder each startup, would be better to cut down on the number of temporary folders introduced
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
Just run a Gradio app with a large number of components and watch how many temporary folders are added
### Screenshot
_No response_
### Logs
```shell
N/A
```
### System Info
```shell
3.23.0, Windows, Chrome
```
### Severity
annoying
| Interesting. Empty folders should not be created -- I'll take a look at this
Hi @space-nuko !
Is this still an issue in the latest gradio release? All the files created by gradio should now be located in `tempfile.gettempdir() / "gradio"` by default and any subdirectories are created when postprocessing of a component is run, e.g. Gallery.postprocess, Video.postprocess. I can't see the empty directories created on launch.
If there is a repro we can run outside the sd web ui, that would be much appreciated!
| 2023-05-17T17:29:58 |
gradio-app/gradio | 4,350 | gradio-app__gradio-4350 | [
"4324"
] | 01d334b0b971008e3565adcffcb053ebb99e11f8 | diff --git a/demo/chatbot_simple/run.py b/demo/chatbot_simple/run.py
--- a/demo/chatbot_simple/run.py
+++ b/demo/chatbot_simple/run.py
@@ -2,6 +2,15 @@
import random
import time
+md = """This is some code:
+
+<h1>hello</h1>
+
+```py
+def fn(x, y, z):
+ print(x, y, z)
+"""
+
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
msg = gr.Textbox()
@@ -9,7 +18,7 @@
def respond(message, chat_history):
bot_message = random.choice(["How are you?", "I love you", "I'm very hungry"])
- chat_history.append((message, bot_message))
+ chat_history.append((message, md))
time.sleep(1)
return "", chat_history
| Copy of code blocks does not work for LAN access
### Describe the bug
When I access my application on the LAN and click the copy button in the code block, an error is reported
If I access localhost, the problem will no longer exist
Please fix it as soon as possible

### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
.launch(server_name="0.0.0.0"
### Screenshot
_No response_
### Logs
```shell
properties of undefined (reading 'writeText')
at HTMLButtonElement.<anonymous> (ChatBot.svelte:57:25)
(匿名) @ ChatBot.svelte:57
ChatBot.svelte:5
```
### System Info
```shell
no
```
### Severity
annoying
| This error is due to the browser security policy that does not allow the `navigator.clipboard.writeText()` method when accessing on a LAN. This is to prevent malicious websites from abusing the clipboard function.
The reason it works properly when accessed locally is that the local environment is considered a trusted environment and therefore the clipboard API can be used.
To solve this problem, you can use a library or technology such as `clipboard.js`, which provides cross-browser clipboard functionality and is not restricted by browser security policies. The following is sample code for using `clipboard.js`:
1. First, you need to introduce the `clipboard.js` library. You can download the library from the official website, or use the CDN link: ``clipboard.js`:
```html
<script src="https://cdnjs.cloudflare.com/ajax/libs/clipboard.js/2.0.8/clipboard.min.js"></script>
```
2. Then, in your code, replace the following code:
```javascript
navigator.clipboard.writeText(code_node.innerText.trimEnd()).
```
for:
```javascript
const clipboard = new ClipboardJS(button, {
text: function() {
return code_node.innerText.trimEnd().
}
}).
```
Make sure you have properly introduced the `clipboard.js` library and added it to the handler for the button click event.
By using `clipboard.js`, you can use the clipboard functionality properly in a LAN environment while avoiding the restrictions of browser security policies.
Translated with www.DeepL.com/Translator (free version)
Hi @Kilig947 it looks like you are using an older version of Gradio. Can you confirm if this is still an issue in the latest version of `gradio` (3.32)?
> Hi @Kilig947 it looks like you are using an older version of Gradio. Can you confirm if this is still an issue in the latest version of `gradio` (3.32)?
Is drawing HTML code no longer supported in 3.32.0 chatbot? If I throw the HTML code to chatbot, it will display it directly in source code 🥲 🥲 🥲 🥲 🥲


demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name=f"{func_box.ipaddr()}", share=True, server_port=PORT, auth=AUTHENTICATION)
The start-up parameters are as above, and when the copy button is clicked, no error is reported, but the copy is not successful either🥲

(Light theme): http://localhost:7891
(dark theme): http://10.13.78.56:7891/? __theme=dark
Both in 3.28.3 and 3.32.0, when I visit http://localhost:7891, the copy button works fine, but when I visit http://10.13.78.56:7891/? __theme=dark, the copy button does not work properly
> demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name=f"{func_box.ipaddr()}", share=True, server_port=PORT, auth=AUTHENTICATION) The start-up parameters are as above, and when the copy button is clicked, no error is reported, but the copy is not successful either🥲
>
> 
| 2023-05-28T03:27:07 |
|
gradio-app/gradio | 4,422 | gradio-app__gradio-4422 | [
"4026"
] | 9c5a1d871c306813a663582e7aa0ee7fbcd8e97a | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -64,6 +64,7 @@
EventListenerMethod,
Inputable,
Playable,
+ Recordable,
Releaseable,
Selectable,
Streamable,
@@ -2134,6 +2135,7 @@ class Video(
Changeable,
Clearable,
Playable,
+ Recordable,
Uploadable,
IOComponent,
VideoSerializable,
@@ -2482,6 +2484,7 @@ class Audio(
Changeable,
Clearable,
Playable,
+ Recordable,
Streamable,
Uploadable,
IOComponent,
diff --git a/gradio/events.py b/gradio/events.py
--- a/gradio/events.py
+++ b/gradio/events.py
@@ -131,6 +131,11 @@ def __call__(
warnings.warn(
"The 'status_tracker' parameter has been deprecated and has no effect."
)
+ if self.event_name == "stop":
+ warnings.warn(
+ "The `stop` event on Video and Audio has been deprecated and will be remove in a future version. Use `ended` instead."
+ )
+
if isinstance(self, Streamable):
self.check_streamable()
if isinstance(show_progress, bool):
@@ -234,13 +239,19 @@ def __init__(self):
self.pause = EventListenerMethod(self, "pause")
"""
- This listener is triggered when the user pauses the component (e.g. audio or video).
+ This listener is triggered when the media stops playing for any reason (e.g. audio or video).
This method can be used when this component is in a Gradio Blocks.
"""
self.stop = EventListenerMethod(self, "stop")
"""
- This listener is triggered when the user stops the component (e.g. audio or video).
+ This listener is triggered when the user reaches the end of the media track (e.g. audio or video).
+ This method can be used when this component is in a Gradio Blocks.
+ """
+
+ self.end = EventListenerMethod(self, "end")
+ """
+ This listener is triggered when the user reaches the end of the media track (e.g. audio or video).
This method can be used when this component is in a Gradio Blocks.
"""
@@ -264,6 +275,22 @@ def check_streamable(self):
pass
+@document("*start_recording", "*stop_recording", inherit=True)
+class Recordable(EventListener):
+ def __init__(self):
+ self.start_recording = EventListenerMethod(self, "start_recording")
+ """
+ This listener is triggered when the user starts recording with the component (e.g. audio or video).
+ This method can be used when this component is in a Gradio Blocks.
+ """
+
+ self.stop_recording = EventListenerMethod(self, "stop_recording")
+ """
+ This listener is triggered when the user stops recording with the component (e.g. audio or video).
+ This method can be used when this component is in a Gradio Blocks.
+ """
+
+
@document("*blur", inherit=True)
class Blurrable(EventListener):
def __init__(self):
| Stop event handler not triggered for audio
### Describe the bug
The `stop` event handle is not triggered for audio
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
```
import gradio as gr
def stop_streaming():
print("Streaming has stopped")
if __name__ == "__main__":
with gr.Blocks() as demo:
stream_input = gr.Audio(source="microphone")
stream_input.stop(stop_streaming)
demo.queue().launch(share=False, debug=False)
```
### Screenshot
"Streaming has stopped" is not printed to the console.

### Logs
```shell
-
```
### System Info
```shell
3.28.1
```
### Severity
serious, but I can work around it
| Should check all the `Playable` events (stop, pause, play) work as well!
I don't think we have ever emitted a `stop` event when the webcam recording is stopped. I think those events are only triggered by the player.
We may want to consider separate events for start and stop recording because they are different to simply stopping an existing media track.
I checked and we haven't, this is a feature request (although a sensible one).
What's the difference between `stop` and `pause` events (both defined on Playable event class)?
I'm using a very inefficient method to catch stops, is there any chance this could be improved?
> I'm using a very inefficient method to catch stops, is there any chance this could be improved?
@caffeinism Could you share the code sample for how you have the workaround to catch stops? Thanks! | 2023-06-05T04:12:11 |
|
gradio-app/gradio | 4,440 | gradio-app__gradio-4440 | [
"4424"
] | e364f81ffcf79a22c4f80fe0efeedb9a8f2fde25 | diff --git a/client/python/gradio_client/serializing.py b/client/python/gradio_client/serializing.py
--- a/client/python/gradio_client/serializing.py
+++ b/client/python/gradio_client/serializing.py
@@ -286,11 +286,9 @@ def _deserialize_single(
root_url + "file=" + filepath,
hf_token=hf_token,
dir=save_dir,
- ).name
+ )
else:
- file_name = utils.create_tmp_copy_of_file(
- filepath, dir=save_dir
- ).name
+ file_name = utils.create_tmp_copy_of_file(filepath, dir=save_dir)
else:
data = x.get("data")
assert data is not None, f"The 'data' field is missing in {x}"
diff --git a/client/python/gradio_client/utils.py b/client/python/gradio_client/utils.py
--- a/client/python/gradio_client/utils.py
+++ b/client/python/gradio_client/utils.py
@@ -6,6 +6,7 @@
import mimetypes
import os
import pkgutil
+import secrets
import shutil
import tempfile
from concurrent.futures import CancelledError
@@ -273,40 +274,27 @@ async def get_pred_from_ws(
def download_tmp_copy_of_file(
url_path: str, hf_token: str | None = None, dir: str | None = None
-) -> tempfile._TemporaryFileWrapper:
+) -> str:
if dir is not None:
os.makedirs(dir, exist_ok=True)
headers = {"Authorization": "Bearer " + hf_token} if hf_token else {}
- prefix = Path(url_path).stem
- suffix = Path(url_path).suffix
- file_obj = tempfile.NamedTemporaryFile(
- delete=False,
- prefix=prefix,
- suffix=suffix,
- dir=dir,
- )
+ directory = Path(dir or tempfile.gettempdir()) / secrets.token_hex(20)
+ directory.mkdir(exist_ok=True, parents=True)
+ file_path = directory / Path(url_path).name
+
with requests.get(url_path, headers=headers, stream=True) as r, open(
- file_obj.name, "wb"
+ file_path, "wb"
) as f:
shutil.copyfileobj(r.raw, f)
- return file_obj
+ return str(file_path.resolve())
-def create_tmp_copy_of_file(
- file_path: str, dir: str | None = None
-) -> tempfile._TemporaryFileWrapper:
- if dir is not None:
- os.makedirs(dir, exist_ok=True)
- prefix = Path(file_path).stem
- suffix = Path(file_path).suffix
- file_obj = tempfile.NamedTemporaryFile(
- delete=False,
- prefix=prefix,
- suffix=suffix,
- dir=dir,
- )
- shutil.copy2(file_path, file_obj.name)
- return file_obj
+def create_tmp_copy_of_file(file_path: str, dir: str | None = None) -> str:
+ directory = Path(dir or tempfile.gettempdir()) / secrets.token_hex(20)
+ directory.mkdir(exist_ok=True, parents=True)
+ dest = directory / Path(file_path).name
+ shutil.copy2(file_path, dest)
+ return str(dest.resolve())
def get_mimetype(filename: str) -> str | None:
| diff --git a/client/python/test/test_utils.py b/client/python/test/test_utils.py
--- a/client/python/test/test_utils.py
+++ b/client/python/test/test_utils.py
@@ -62,7 +62,7 @@ def test_download_private_file():
url_path = "https://gradio-tests-not-actually-private-space.hf.space/file=lion.jpg"
hf_token = "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" # Intentionally revealing this key for testing purposes
file = utils.download_tmp_copy_of_file(url_path=url_path, hf_token=hf_token)
- assert file.name.endswith(".jpg")
+ assert Path(file).name.endswith(".jpg")
@pytest.mark.parametrize(
diff --git a/test/test_examples.py b/test/test_examples.py
--- a/test/test_examples.py
+++ b/test/test_examples.py
@@ -1,5 +1,6 @@
import os
import tempfile
+from pathlib import Path
from unittest.mock import patch
import pytest
@@ -372,3 +373,27 @@ async def test_multiple_file_flagging(tmp_path):
assert len(prediction[0]) == 2
assert all(isinstance(d, dict) for d in prediction[0])
+
+
[email protected]
+async def test_examples_keep_all_suffixes(tmp_path):
+ with patch("gradio.helpers.CACHED_FOLDER", str(tmp_path)):
+ file_1 = tmp_path / "foo.bar.txt"
+ file_1.write_text("file 1")
+ file_2 = tmp_path / "file_2"
+ file_2.mkdir(parents=True)
+ file_2 = file_2 / "foo.bar.txt"
+ file_2.write_text("file 2")
+ io = gr.Interface(
+ fn=lambda x: x.name,
+ inputs=gr.File(),
+ outputs=[gr.File()],
+ examples=[[str(file_1)], [str(file_2)]],
+ cache_examples=True,
+ )
+ prediction = await io.examples_handler.load_from_cache(0)
+ assert Path(prediction[0]["name"]).read_text() == "file 1"
+ assert prediction[0]["orig_name"] == "foo.bar.txt"
+ prediction = await io.examples_handler.load_from_cache(1)
+ assert Path(prediction[0]["name"]).read_text() == "file 2"
+ assert prediction[0]["orig_name"] == "foo.bar.txt"
| Examples bug when filename has two periods
### Describe the bug
Just observed that when attempting to use add an example to a web app with two periods, e.g., `test_data.nii.gz`, the updated filename after example click event corrupted the file extension.
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Reproduction
There is likely a much simpler way to reproduce it, but below is an example that somewhat mimics my application. I am trying to connect the output of the Example click event to be linked to a `gr.File()` widget.
```
import gradio as gr
with gr.Blocks() as demo:
file_output = gr.File(file_count="single")
file_output.upload(upload_func(), file_output, file_output)
run_button = gr.Button("Run analysis")
run_button.click(fn=run_func(), inputs=file_output, outputs=[...])
gr.Examples(examples=["test_data.nii.gz"], inputs=[...], outputs=[...],fn=upload_func(), cache_examples=True)
```
### Screenshot
See updated filename after attempting to use the Example with extension `.nii.gz`.

### Logs
```shell
Not necessary.
```
### System Info
```shell
gradio version: 3.32.0
Browser: Google Chrome
Operating system: Ubuntu Linux 20.04
gradio app deployed through Docker on Hugging Face spaces
The [neukit](https://huggingface.co/spaces/andreped/neukit) gradio app itself can be accessed [here](https://huggingface.co/spaces/andreped/neukit).
```
### Severity
serious, but I can work around it
| Hi @andreped !
Is there anything in the logs or browser console about what caused the problem?
I tried reproducing with the following demo (its `demo/video_component/run.py` in the repo with some modifications)
```python
import gradio as gr
import os
a = os.path.join(os.path.dirname(__file__), "files/world.mp4") # Video
b = os.path.join(os.path.dirname(__file__), "files/a.mp4") # Video
c = os.path.join(os.path.dirname(__file__), "files/b.mp4") # Video
d = os.path.join(os.path.dirname(__file__), "files/a.foo.mp4") # Video
demo = gr.Interface(
fn=lambda x: (x, x),
inputs=gr.File(type="file"),
outputs=[gr.Video(), gr.File()],
examples=[[a], [b], [c], [d]],
cache_examples=True,
)
if __name__ == "__main__":
demo.launch()
```
Clicking on the example of the file with two periods in the name works and the video is playable. I wonder if you can provide a reproducer of the problem you're seeing?

@freddyaboulton @andreped it looks like this issue happens because we keep only the final suffix as the file extension for the temporary copy of the file that we create. In almost every case, this is the right thing to do, as filenames can have multiple periods (".") in the filename, but only the final suffix corresponds to the file extension. For example, if the filename is "a.foo.mp4", the file extension should be ".mp4".
However, it looks like there are some file extensions (like `.nii.gz` or `tar.gz`) where this leads to the file becoming corrupted, or at least not being recognized as the correct filetype. I think the safer approach is actually to keep *all* of the suffixes. So for example:
"a.foo.mp4" --> "temp_name.foo.mp4"
"a.nii.gz" --> "temp_name.nii.gz"
> I think the safer approach is actually to keep all of the suffixes.
@abidlabs Thanks for the update. I guess this is something that will need to be fixed inside gradio. For now I just only support files in *.nii format, which is fine as a temporary workaround. | 2023-06-07T00:33:21 |
gradio-app/gradio | 4,453 | gradio-app__gradio-4453 | [
"1349"
] | 6888e30e7932512d7766b9a5137f567bd0a2e136 | diff --git a/gradio/components.py b/gradio/components.py
--- a/gradio/components.py
+++ b/gradio/components.py
@@ -2172,6 +2172,7 @@ def __init__(
elem_classes: list[str] | str | None = None,
mirror_webcam: bool = True,
include_audio: bool | None = None,
+ autoplay: bool = False,
**kwargs,
):
"""
@@ -2195,6 +2196,7 @@ def __init__(
include_audio: Whether the component should record/retain the audio track for a video. By default, audio is excluded for webcam videos and included for uploaded videos.
"""
self.format = format
+ self.autoplay = autoplay
valid_sources = ["upload", "webcam"]
if source not in valid_sources:
raise ValueError(
@@ -2231,6 +2233,7 @@ def get_config(self):
"width": self.width,
"mirror_webcam": self.mirror_webcam,
"include_audio": self.include_audio,
+ "autoplay": self.autoplay,
**IOComponent.get_config(self),
}
@@ -2250,6 +2253,7 @@ def update(
min_width: int | None = None,
interactive: bool | None = None,
visible: bool | None = None,
+ autoplay: bool | None = None,
):
return {
"source": source,
@@ -2263,6 +2267,7 @@ def update(
"interactive": interactive,
"visible": visible,
"value": value,
+ "autoplay": autoplay,
"__type__": "update",
}
@@ -2518,6 +2523,7 @@ def __init__(
elem_id: str | None = None,
elem_classes: list[str] | str | None = None,
format: Literal["wav", "mp3"] = "wav",
+ autoplay: bool = False,
**kwargs,
):
"""
@@ -2572,12 +2578,14 @@ def __init__(
)
TokenInterpretable.__init__(self)
self.format = format
+ self.autoplay = autoplay
def get_config(self):
return {
"source": self.source,
"value": self.value,
"streaming": self.streaming,
+ "autoplay": self.autoplay,
**IOComponent.get_config(self),
}
@@ -2598,6 +2606,7 @@ def update(
min_width: int | None = None,
interactive: bool | None = None,
visible: bool | None = None,
+ autoplay: bool | None = None,
):
return {
"source": source,
@@ -2609,6 +2618,7 @@ def update(
"interactive": interactive,
"visible": visible,
"value": value,
+ "autoplay": autoplay,
"__type__": "update",
}
| diff --git a/test/test_blocks.py b/test/test_blocks.py
--- a/test/test_blocks.py
+++ b/test/test_blocks.py
@@ -1154,6 +1154,7 @@ def test_with_generic_update(self):
}
)
assert specific_update == {
+ "autoplay": None,
"source": None,
"label": None,
"show_label": None,
diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -822,6 +822,7 @@ def test_component_functions(self):
audio_input = gr.Audio(label="Upload Your Audio")
assert audio_input.get_config() == {
+ "autoplay": False,
"source": "upload",
"name": "audio",
"streaming": False,
@@ -856,6 +857,7 @@ def test_component_functions(self):
audio_output = gr.Audio(type="filepath")
assert filecmp.cmp(y_audio.name, audio_output.postprocess(y_audio.name)["name"])
assert audio_output.get_config() == {
+ "autoplay": False,
"name": "audio",
"streaming": False,
"show_label": True,
@@ -1306,6 +1308,7 @@ def test_component_functions(self):
video_input = gr.Video(label="Upload Your Video")
assert video_input.get_config() == {
+ "autoplay": False,
"source": "upload",
"name": "video",
"show_label": True,
| Add option for `autoplay` in Audio component
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
I'd like to be able to auto play a sound when it comes back from the inference.
**Describe the solution you'd like**
An new attribute, `False` by default, in the `Audio` component to enable/disable autoplay of the HTML audio element.
| Seems reasonable to me. I think this would make sense as a parameter in the "style()" function of the "Audio()" component.
Auto playing audio is not allowed in chrome, although works in other browsers. Auto playing videos work in all browsers as long as the video is muted.
There are some workarounds but they are pretty heavy and can be inconsistent.
Will see if we can any other options here since there have already been user interactions.
I would recommend against using any work around to enforce auto play and to use the HTML `autoplay` attribute instead and just let the browsers do what they will do. Audio and video autoplay can cause accessibility issues, especially for users who are using screen readers. If you use `autoplay` and ensure there is a way for users to pause audio and video, it shouldn’t cause any a11y issues.
http://www.w3.org/TR/WCAG20-TECHS/F93.html
In most cases, it is a slightly different case to normal autoplaying videos. It is similar to a user uploading a video via a file input and that video then autoplaying (as opposed to autoplaying on page load) but the main issue still stands. We would need to focus an appropriate part of the screen (possibly the video elements itself).
On balance, I think autoplaying with the video muted (chrome default) is probably the best approach if we decide to support this.
> I think this would make sense as a parameter in the "style()" function of the "Audio()" component.
I don't agree with this though, this isn't a stylistic choice but changes the behaviour of the UI. It should be a kwarg and updatable via `gr.Update` imo.
I needed this for a small internal demo, where we wanted to output text-to-speech without the user pressing an additional button. My workaround looks like this:
```python
audio_el = gr.Audio(type="numpy", elem_id="speaker")
autoplay_audio = """async () => {
console.log('playing audio in 2 seconds')
let gradioEl = document.querySelector('body > gradio-app').shadowRoot;
setTimeout(() => {
let audioplayer = gradioEl.querySelector('#speaker > audio');
audioplayer.play();
}, 2000)
}"""
transcription.change(fn=update_audio, inputs=state, outputs=audio_el, _js=autoplay_audio)
```
It works fine, but obviously the 2 seconds delay are hard coded because this is how long our system approximately needs to display the audio player. Is there a way to get the state of the audio component?
> I needed this for a small internal demo, where we wanted to output text-to-speech without the user pressing an additional button. My workaround looks like this:
>
> ```python
> audio_el = gr.Audio(type="numpy", elem_id="speaker")
> autoplay_audio = """async () => {
> console.log('playing audio in 2 seconds')
> let gradioEl = document.querySelector('body > gradio-app').shadowRoot;
> setTimeout(() => {
> let audioplayer = gradioEl.querySelector('#speaker > audio');
> audioplayer.play();
> }, 2000)
> }"""
> transcription.change(fn=update_audio, inputs=state, outputs=audio_el, _js=autoplay_audio)
> ```
>
> It works fine, but obviously the 2 seconds delay are hard coded because this is how long our system approximately needs to display the audio player. Is there a way to get the state of the audio component?
I'm in same satuation to showcase an ai chat demo with TTS feature and no user.
Agreeed. It would be great to enable this, as well as https://github.com/gradio-app/gradio/discussions/3316. cc @aliabid94
@pi43r and/or @Arcadia822 it sounds like you got a workaround running? @pi43r thank you for the code snippet. I unfortunately am having trouble getting it to work. I either end up with a `None` input, or I just don't see any of the javascript running. Did you have to configure your browser, or use a specific browser, to get it working? Can you describe what `update_audio` and `state` is in that code so I can understand what is feeding into `audio_el`?
> @pi43r and/or @Arcadia822 it sounds like you got a workaround running? @pi43r thank you for the code snippet. I unfortunately am having trouble getting it to work. I either end up with a `None` input, or I just don't see any of the javascript running. Did you have to configure your browser, or use a specific browser, to get it working? Can you describe what `update_audio` and `state` is in that code so I can understand what is feeding into `audio_el`?
It's been a while since I wrote the code and gradio might have changed. It should work in any browser (maybe not in safari because apple is different).
`update_audio` in our case was a simple function that generates the audio and converts it to a numpy array, then places it in the `state` list.
https://gradio.app/interface-state/
I might be able to make a minimal reproduction in the future on huggingface...
Strictly as a quick workaround, when I launched the interface of a chatbot I was playing with, I set debug=True and the audio played as the text was returned.
An audio player box popped up below the gradio input/output boxes, but the audio played automatically.
I also suppressed all warnings, to keep them from displaying.
@robjm16
> Strictly as a quick workaround, when I launched the interface of a chatbot I was playing with, I set debug=True and the audio played as the text was returned.
Thanks for the tip. I tried `debug=True` in both the launch function and in the audio block. No luck on my end. Though it may be because I'm using blocks instead? Where did you place your `debug=True`?
in launch function.
On Mon, Mar 13, 2023 at 8:38 PM Tom Szumowski ***@***.***>
wrote:
> @robjm16 <https://github.com/robjm16>
>
> Strictly as a quick workaround, when I launched the interface of a chatbot
> I was playing with, I set debug=True and the audio played as the text was
> returned.
>
> Thanks for the tip. I tried debug=True in both the launch function and in
> the audio block. No luck on my end. Though it may be because I'm using
> blocks instead? Where did you place your debug=True?
>
> —
> Reply to this email directly, view it on GitHub
> <https://github.com/gradio-app/gradio/issues/1349#issuecomment-1467171615>,
> or unsubscribe
> <https://github.com/notifications/unsubscribe-auth/AGFOR2GNCF2Y5SDAC7UWRU3W364ZNANCNFSM5WPTK5MA>
> .
> You are receiving this because you were mentioned.Message ID:
> ***@***.***>
>
I was trying to get this voice-enabled chatbot to work (code below). Wasn't using gradio blocks.
Most of the code taken from:
https://github.com/hackingthemarkets/chatgpt-api-whisper-api-voice-assistant/blob/main/therapist.py
import openai
import gradio as gr
import gtts
from playsound import playsound
from gtts import gTTS #Import Google Text to Speech
from IPython.display import Audio #Import Audio method from IPython's Display Class
import warnings
warnings.filterwarnings("ignore")
openai.api_key = 'YOUR KEY HERE"
# Create list of messages, starting with initial message to the system
messages = [{"role": "system", "content": 'You are a therapist. Respond to all input in 25 words or less.'}]
def transcribe(audio):
"""
Transcribes the user's audio input using the OpenAI API,
generates a response from the chatbot using GPT-3, converts the response into
speech using the gTTS library, updates the conversation history, and returns
the updated conversation history as a string.
Parameters:
audio (str): The filepath of the audio file containing the user's input.
Returns:
str: A string containing the updated conversation history, with each message formatted as "role: content" and separated by two newlines.
"""
# Declare messages a global variable (not local to the function)
global messages
# Get user's audio, transcribe it and append it to messages
audio_file = open(audio, "rb") #"open" is built-in Python command
transcript = openai.Audio.transcribe("whisper-1", audio_file)
messages.append({"role": "user", "content": transcript["text"]})
# Get the therapist's response, append to messages
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
system_message = response["choices"][0]["message"]
messages.append(system_message)
# Create audio from therapist's text response
msg=system_message["content"]
# print(msg) # For validation
talk_file=make_into_speech(msg)
display(Audio(talk_file, autoplay=True))
# Update the rolling chat transcript
chat_transcript = ""
for message in messages:
if message['role'] != 'system':
chat_transcript += message['role'] + ": " + message['content'] + "\n\n"
return chat_transcript
def make_into_speech(words):
"""
Takes a string as input, converts it to speech using the gTTS library,
saves the speech as a WAV file, and returns the filepath of the saved WAV file.
Parameters:
- `words` (str): The input string to convert to speech.
Returns:
- `sound_file` (str): The filepath of the saved WAV file.
Example:
>>> make_into_speech('Hello, how are you today?')
'2.wav'
The function converts the input string to speech and returns the filepath of the saved WAV file.
"""
tts = gTTS(words) #Provide the string to convert to speech
tts.save('2.wav') #Save the string converted to speech as a .wav file
sound_file = '2.wav'
return sound_file
# Launch the interface
ui = gr.Interface(fn=transcribe, inputs=gr.Audio(source="microphone", type="filepath", label="Record Here"), outputs=[gr.Text(label="Chat Transcript")])
ui.launch(debug=True)
Thanks again for the additional info. I was able to get it to work for my use case with the usage of `_js` like described above, but a slightly different javascript string:
https://github.com/tszumowski/vocaltales_storyteller_chatbot/blob/46d46799ff8cdff2f016e484b0ade0e14cb12f8a/storyteller.py#L236-L242
```
autoplay_audio = """
async () => {{
setTimeout(() => {{
document.querySelector('#speaker audio').play();
}}, {speech_delay});
}}
"""
```
I now use this method to add autoplay, which can be referred to below:
```
import gradio as gr
from gtts import gTTS
from io import BytesIO
import base64
def text_to_speech(text):
tts = gTTS(text)
tts.save('hello_world.mp3')
audio_bytes = BytesIO()
tts.write_to_fp(audio_bytes)
audio_bytes.seek(0)
audio = base64.b64encode(audio_bytes.read()).decode("utf-8")
audio_player = f'<audio src="data:audio/mpeg;base64,{audio}" controls autoplay></audio>'
return audio_player
with gr.Blocks() as demo:
html = gr.HTML()
# html.visible = False
text = gr.Text()
btn = gr.Button("OK")
btn.click(text_to_speech, inputs=[text], outputs=[html])
demo.launch()
```
Thanks. This worked for me.
On Thu, Mar 30, 2023 at 5:52 AM CsqTom ***@***.***> wrote:
> I now use this method to add autoplay, which can be referred to below:
> `
> import gradio as gr
> from gtts import gTTS
> from io import BytesIO
> import base64
>
> def text_to_speech(text):
> tts = gTTS(text)
> tts.save('hello_world.mp3')
>
> audio_bytes = BytesIO()
> tts.write_to_fp(audio_bytes)
> audio_bytes.seek(0)
>
> audio = base64.b64encode(audio_bytes.read()).decode("utf-8")
> audio_player = f'<audio src="data:audio/mpeg;base64,{audio}" controls autoplay></audio>'
>
> return audio_player
>
> with gr.Blocks() as demo:
> html = gr.HTML()
> # html.visible = False
>
> text = gr.Text()
> btn = gr.Button("OK")
> btn.click(text_to_speech, inputs=[text], outputs=[html])
>
> demo.launch()
> `
>
> —
> Reply to this email directly, view it on GitHub
> <https://github.com/gradio-app/gradio/issues/1349#issuecomment-1490017507>,
> or unsubscribe
> <https://github.com/notifications/unsubscribe-auth/AGFOR2EBUQLGNQ455SJW7B3W6VJUVANCNFSM5WPTK5MA>
> .
> You are receiving this because you were mentioned.Message ID:
> ***@***.***>
>
> I now use this method to add autoplay, which can be referred to below:
>
> ```
> import gradio as gr
> from gtts import gTTS
> from io import BytesIO
> import base64
>
>
> def text_to_speech(text):
> tts = gTTS(text)
> tts.save('hello_world.mp3')
>
> audio_bytes = BytesIO()
> tts.write_to_fp(audio_bytes)
> audio_bytes.seek(0)
>
> audio = base64.b64encode(audio_bytes.read()).decode("utf-8")
> audio_player = f'<audio src="data:audio/mpeg;base64,{audio}" controls autoplay></audio>'
>
> return audio_player
>
>
> with gr.Blocks() as demo:
> html = gr.HTML()
> # html.visible = False
>
> text = gr.Text()
> btn = gr.Button("OK")
> btn.click(text_to_speech, inputs=[text], outputs=[html])
>
> demo.launch()
> ```
Thank you! It works!
and I am using .wav, so I got this:
```python
def audio_to_html(audio):
audio_bytes = BytesIO()
wavio.write(audio_bytes, audio[1].astype(np.float32), audio[0], sampwidth=4)
audio_bytes.seek(0)
audio_base64 = base64.b64encode(audio_bytes.read()).decode("utf-8")
audio_player = f'<audio src="data:audio/mpeg;base64,{audio_base64}" controls autoplay></audio>'
return audio_player
```
and adding:
```python
import wavio
```
Thanks the above solution worked for me, For a more beginner friendly description of what's going on:
- You have an audio, Consider it to be in bytes (or convert it to bytes)
- Convert it to bytesIO using `audio_io=BtesIO(audio_in_bytes)` (Library: `from io import BytesIO`)
- Point it to the beginning of the audio file using `audio_io.seek(0)`
- Convert the audio_io to base64 to be used in html command using `audio_base64 = base64.b64encode(audio_io.read()).decode("utf-8")` (Library `import base64`)
- Finally, write the html command as string that invokes autoplay for your audio as `audio_html = f'<audio src="data:audio/mpeg;base64,{audio_base64}" controls autoplay></audio>'`
Use the `audio_html` as your output in gradio to auto play that converted audio. This can be altered based on your usage
@CsqTom
> I now use this method to add autoplay, which can be referred to below:
Thank you! Your code worked perfectly when I [integrated it into my codebase](https://github.com/tszumowski/vocaltales_storyteller_chatbot/pull/8), and cleaned it up a lot too! I appreciate the reproducible example you provided as well!
| 2023-06-08T13:53:57 |
gradio-app/gradio | 4,705 | gradio-app__gradio-4705 | [
"4707"
] | 58c6d68f20798cf47a2368de378e20e7a7ad116b | diff --git a/demo/audio_component/run.py b/demo/audio_component/run.py
--- a/demo/audio_component/run.py
+++ b/demo/audio_component/run.py
@@ -5,4 +5,4 @@
with gr.Blocks(css=css) as demo:
gr.Audio()
-demo.launch()
+demo.launch()
\ No newline at end of file
| diff --git a/js/app/src/components/Audio/Audio.test.ts b/js/app/src/components/Audio/Audio.test.ts
--- a/js/app/src/components/Audio/Audio.test.ts
+++ b/js/app/src/components/Audio/Audio.test.ts
@@ -1,7 +1,9 @@
-import { test, describe, assert, afterEach, vi } from "vitest";
-import { cleanup, render, wait_for_event } from "@gradio/tootils";
+import { test, describe, assert, afterEach, vi, beforeAll } from "vitest";
+import { spy, spyOn } from "tinyspy";
+import { cleanup, render, wait_for_event, wait } from "@gradio/tootils";
import event from "@testing-library/user-event";
import { setupi18n } from "../../i18n";
+import { tick } from "svelte";
import Audio from "./Audio.svelte";
import type { LoadingStatus } from "../StatusTracker/types";
@@ -18,10 +20,14 @@ const loading_status = {
};
describe("Audio", () => {
+ beforeAll(() => {
+ window.HTMLMediaElement.prototype.play = vi.fn();
+ window.HTMLMediaElement.prototype.pause = vi.fn();
+ });
afterEach(() => cleanup());
test("renders provided value and label", async () => {
- const { getByTestId, queryAllByText } = render(Audio, {
+ const { getByTestId, queryAllByText } = await render(Audio, {
show_label: true,
loading_status,
mode: "dynamic",
@@ -40,7 +46,7 @@ describe("Audio", () => {
});
assert.isTrue(
- getByTestId("Audio Component-dynamic-audio").src.endsWith(
+ getByTestId("Audio Component-audio").src.endsWith(
"foo/file=https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav"
)
);
@@ -48,7 +54,7 @@ describe("Audio", () => {
});
test("hides label", async () => {
- const { queryAllByText } = render(Audio, {
+ const { queryAllByText } = await render(Audio, {
show_label: false,
loading_status,
mode: "dynamic",
@@ -71,7 +77,7 @@ describe("Audio", () => {
test("upload sets change event", async () => {
setupi18n();
- const { container, component } = render(Audio, {
+ const { container, component } = await render(Audio, {
show_label: false,
loading_status,
value: null,
@@ -97,7 +103,7 @@ describe("Audio", () => {
});
test("static audio sets value", async () => {
- const { getByTestId } = render(Audio, {
+ const { getByTestId } = await render(Audio, {
show_label: true,
loading_status,
mode: "static",
@@ -116,7 +122,7 @@ describe("Audio", () => {
});
assert.isTrue(
- getByTestId("Audio Component-static-audio").src.endsWith(
+ getByTestId("Audio Component-audio").src.endsWith(
"foo/file=https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav"
)
);
@@ -158,7 +164,7 @@ describe("Audio", () => {
vi.stubGlobal("navigator", media_mock);
vi.stubGlobal("MediaRecorder", media_recorder_mock);
- const { component, getByText } = render(Audio, {
+ const { component, getByText } = await render(Audio, {
show_label: true,
loading_status,
mode: "dynamic",
@@ -168,7 +174,8 @@ describe("Audio", () => {
root_url: null,
streaming: false,
pending: false,
- source: "microphone"
+ source: "microphone",
+ name: "bar"
});
const startButton = getByText("Record from microphone");
@@ -184,4 +191,130 @@ describe("Audio", () => {
assert.equal(component.$capture_state().value.name, "audio.wav");
assert.equal(mock.callCount, 1);
});
+
+ test("when autoplay is true `media.play` should be called in static mode", async () => {
+ const { getByTestId } = await render(Audio, {
+ show_label: true,
+ loading_status,
+ mode: "static",
+ value: {
+ name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
+ data: null,
+ is_file: true
+ },
+ label: "static",
+ root: "foo",
+ root_url: null,
+ streaming: false,
+ pending: false,
+ source: "microphone",
+ autoplay: true
+ });
+
+ const startButton = getByTestId<HTMLAudioElement>("static-audio");
+ const fn = spyOn(startButton, "play");
+ startButton.dispatchEvent(new Event("loadeddata"));
+
+ assert.equal(fn.callCount, 1);
+ });
+
+ test("when autoplay is true `media.play` should be called in dynamic mode", async () => {
+ const { getByTestId } = await render(Audio, {
+ show_label: true,
+ loading_status,
+ mode: "dynamic",
+ value: {
+ name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
+ data: null,
+ is_file: true
+ },
+ label: "dynamic",
+ root: "foo",
+ root_url: null,
+ streaming: false,
+ pending: false,
+ source: "microphone",
+ autoplay: true
+ });
+
+ const startButton = getByTestId<HTMLAudioElement>("dynamic-audio");
+ const fn = spyOn(startButton, "play");
+ startButton.dispatchEvent(new Event("loadeddata"));
+
+ assert.equal(fn.callCount, 1);
+ });
+
+ test("when autoplay is true `media.play` should be called in static mode when the audio data is updated", async () => {
+ const { component, getByTestId } = await render(Audio, {
+ show_label: true,
+ loading_status,
+ mode: "static",
+ value: {
+ name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
+ data: null,
+ is_file: true
+ },
+ label: "static",
+ root: "foo",
+ root_url: null,
+ streaming: false,
+ pending: false,
+ source: "microphone",
+ autoplay: true
+ });
+
+ const startButton = getByTestId<HTMLAudioElement>("static-audio");
+ const fn = spyOn(startButton, "play");
+
+ startButton.dispatchEvent(new Event("loadeddata"));
+
+ component.$set({
+ value: {
+ name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
+ data: null,
+ is_file: true
+ }
+ });
+
+ startButton.dispatchEvent(new Event("loadeddata"));
+
+ assert.equal(fn.callCount, 2);
+ });
+
+ test("when autoplay is true `media.play` should be called in dynamic mode when the audio data is updated", async () => {
+ const { component, getByTestId } = await render(Audio, {
+ show_label: true,
+ loading_status,
+ mode: "dynamic",
+ value: {
+ name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
+ data: null,
+ is_file: true
+ },
+ label: "dynamic",
+ root: "foo",
+ root_url: null,
+ streaming: false,
+ pending: false,
+ source: "microphone",
+ autoplay: true
+ });
+
+ const startButton = getByTestId<HTMLAudioElement>("dynamic-audio");
+ const fn = spyOn(startButton, "play");
+
+ startButton.dispatchEvent(new Event("loadeddata"));
+
+ component.$set({
+ value: {
+ name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
+ data: null,
+ is_file: true
+ }
+ });
+
+ startButton.dispatchEvent(new Event("loadeddata"));
+
+ assert.equal(fn.callCount, 2);
+ });
});
diff --git a/js/app/src/components/Button/Button.test.ts b/js/app/src/components/Button/Button.test.ts
--- a/js/app/src/components/Button/Button.test.ts
+++ b/js/app/src/components/Button/Button.test.ts
@@ -7,13 +7,17 @@ import Button from "./Button.svelte";
describe("Hello.svelte", () => {
afterEach(() => cleanup());
- test.skip("renders label text", () => {
- const { container, component } = render(Button, { value: "Click Me" });
+ test.skip("renders label text", async () => {
+ const { container, component } = await render(Button, {
+ value: "Click Me"
+ });
assert.equal(container.innerText, "Click Me");
});
test.skip("triggers callback when clicked", async () => {
- const { container, component } = render(Button, { value: "Click Me" });
+ const { container, component } = await render(Button, {
+ value: "Click Me"
+ });
const mock = spy();
component.$on("click", mock);
diff --git a/js/app/src/components/Chatbot/Chatbot.test.ts b/js/app/src/components/Chatbot/Chatbot.test.ts
--- a/js/app/src/components/Chatbot/Chatbot.test.ts
+++ b/js/app/src/components/Chatbot/Chatbot.test.ts
@@ -19,7 +19,7 @@ describe.skip("Chatbot", () => {
afterEach(() => cleanup());
test("renders user and bot messages", async () => {
- const { getAllByTestId } = render(Chatbot, {
+ const { getAllByTestId } = await render(Chatbot, {
loading_status,
label: "hello",
value: [["user message one", "bot message one"]],
@@ -37,7 +37,7 @@ describe.skip("Chatbot", () => {
});
test("renders additional message as they are passed", async () => {
- const { component, getAllByTestId } = render(Chatbot, {
+ const { component, getAllByTestId } = await render(Chatbot, {
loading_status,
label: "hello",
value: [["user message one", "bot message one"]],
diff --git a/js/app/src/components/ColorPicker/ColorPicker.test.ts b/js/app/src/components/ColorPicker/ColorPicker.test.ts
--- a/js/app/src/components/ColorPicker/ColorPicker.test.ts
+++ b/js/app/src/components/ColorPicker/ColorPicker.test.ts
@@ -17,8 +17,8 @@ const loading_status = {
describe("ColorPicker", () => {
afterEach(() => cleanup());
- test("renders provided value", () => {
- const { getByDisplayValue } = render(ColorPicker, {
+ test("renders provided value", async () => {
+ const { getByDisplayValue } = await render(ColorPicker, {
loading_status,
show_label: true,
mode: "dynamic",
@@ -31,7 +31,7 @@ describe("ColorPicker", () => {
});
test("changing the color should update the value", async () => {
- const { component, getByDisplayValue } = render(ColorPicker, {
+ const { component, getByDisplayValue } = await render(ColorPicker, {
loading_status,
show_label: true,
mode: "dynamic",
diff --git a/js/app/src/components/Gallery/Gallery.test.ts b/js/app/src/components/Gallery/Gallery.test.ts
--- a/js/app/src/components/Gallery/Gallery.test.ts
+++ b/js/app/src/components/Gallery/Gallery.test.ts
@@ -4,14 +4,15 @@ import { cleanup, render } from "@gradio/tootils";
import Gallery from "./Gallery.svelte";
import type { LoadingStatus } from "../StatusTracker/types";
-const loading_status = {
+const loading_status: LoadingStatus = {
eta: 0,
queue_position: 1,
queue_size: 1,
- status: "complete" as LoadingStatus["status"],
+ status: "complete",
scroll_to_output: false,
visible: true,
- fn_index: 0
+ fn_index: 0,
+ show_progress: "full"
};
describe("Gallery", () => {
@@ -21,7 +22,9 @@ describe("Gallery", () => {
});
test("preview shows detailed image by default", async () => {
- const { getAllByTestId, getByTestId } = render(Gallery, {
+ window.Element.prototype.scrollTo = vi.fn(() => {});
+
+ const { getAllByTestId, getByTestId } = await render(Gallery, {
loading_status,
label: "gallery",
// @ts-ignore
@@ -51,14 +54,12 @@ describe("Gallery", () => {
});
const details = getAllByTestId("detailed-image");
- const container = getByTestId("container_el");
- container.scrollTo = () => {};
assert.equal(details.length, 1);
});
test("detailed view does not show larger image", async () => {
- const { queryAllByTestId, getByTestId } = render(Gallery, {
+ const { queryAllByTestId, getByTestId } = await render(Gallery, {
loading_status,
label: "gallery",
// @ts-ignore
diff --git a/js/app/src/components/Radio/Radio.test.ts b/js/app/src/components/Radio/Radio.test.ts
--- a/js/app/src/components/Radio/Radio.test.ts
+++ b/js/app/src/components/Radio/Radio.test.ts
@@ -21,8 +21,8 @@ describe("Radio", () => {
afterEach(() => cleanup());
const choices = ["dog", "cat", "turtle"];
- test("renders provided value", () => {
- const { getAllByRole, getByTestId } = render(Radio, {
+ test("renders provided value", async () => {
+ const { getAllByRole, getByTestId } = await render(Radio, {
show_label: true,
loading_status,
choices: choices,
@@ -46,7 +46,7 @@ describe("Radio", () => {
});
test("should update the value when a radio is clicked", async () => {
- const { getByDisplayValue, getByTestId } = render(Radio, {
+ const { getByDisplayValue, getByTestId } = await render(Radio, {
show_label: true,
loading_status,
choices: choices,
diff --git a/js/app/src/components/Textbox/Textbox.test.ts b/js/app/src/components/Textbox/Textbox.test.ts
--- a/js/app/src/components/Textbox/Textbox.test.ts
+++ b/js/app/src/components/Textbox/Textbox.test.ts
@@ -19,8 +19,8 @@ const loading_status = {
describe("Textbox", () => {
afterEach(() => cleanup());
- test("renders provided value", () => {
- const { getByDisplayValue } = render(Textbox, {
+ test("renders provided value", async () => {
+ const { getByDisplayValue } = await render(Textbox, {
show_label: true,
max_lines: 1,
loading_status,
@@ -35,7 +35,7 @@ describe("Textbox", () => {
});
test("changing the text should update the value", async () => {
- const { component, getByDisplayValue } = render(Textbox, {
+ const { component, getByDisplayValue } = await render(Textbox, {
show_label: true,
max_lines: 10,
loading_status,
diff --git a/js/app/src/components/Video/Video.test.ts b/js/app/src/components/Video/Video.test.ts
new file mode 100644
--- /dev/null
+++ b/js/app/src/components/Video/Video.test.ts
@@ -0,0 +1,251 @@
+import {
+ test,
+ describe,
+ assert,
+ afterEach,
+ vi,
+ beforeAll,
+ beforeEach,
+ expect
+} from "vitest";
+import { spyOn } from "tinyspy";
+import { cleanup, render } from "@gradio/tootils";
+import { setupi18n } from "../../i18n";
+
+import Video from "./Video.svelte";
+import type { LoadingStatus } from "../StatusTracker/types";
+
+const loading_status = {
+ eta: 0,
+ queue_position: 1,
+ queue_size: 1,
+ status: "complete" as LoadingStatus["status"],
+ scroll_to_output: false,
+ visible: true,
+ fn_index: 0,
+ show_progress: "full" as LoadingStatus["show_progress"]
+};
+
+describe("Video", () => {
+ beforeAll(() => {
+ window.HTMLMediaElement.prototype.play = vi.fn();
+ window.HTMLMediaElement.prototype.pause = vi.fn();
+ });
+ beforeEach(setupi18n);
+ afterEach(() => cleanup());
+
+ test("renders provided value and label", async () => {
+ const { getByTestId, queryAllByText } = await render(Video, {
+ show_label: true,
+ loading_status,
+ mode: "dynamic",
+ value: [
+ {
+ name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
+ data: null,
+ is_file: true
+ }
+ ],
+ label: "Test Label",
+ root: "foo",
+ root_url: null,
+ streaming: false,
+ pending: false,
+ name: "bar",
+ source: "upload"
+ });
+ assert.isTrue(
+ getByTestId("Test Label-player").src.endsWith(
+ "foo/file=https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav"
+ )
+ );
+ assert(queryAllByText("Test Label").length, 1);
+ });
+
+ test("hides label", async () => {
+ const { queryAllByText } = await render(Video, {
+ show_label: false,
+ loading_status,
+ mode: "dynamic",
+ value: {
+ name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
+ data: null,
+ is_file: true
+ },
+ label: "Video Component",
+ root: "foo",
+ root_url: null,
+ streaming: false,
+ pending: false,
+ name: "bar",
+ source: "upload"
+ });
+ assert(queryAllByText("Video Component").length, 0);
+ });
+
+ test("static Video sets value", async () => {
+ const { getByTestId } = await render(Video, {
+ show_label: true,
+ loading_status,
+ mode: "static",
+ value: [
+ {
+ name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
+ data: null,
+ is_file: true
+ }
+ ],
+ root: "foo",
+ root_url: null,
+ streaming: false,
+ pending: false,
+ name: "bar",
+ source: "upload"
+ });
+ assert.isTrue(
+ getByTestId("test-player").src.endsWith(
+ "foo/file=https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav"
+ )
+ );
+ });
+
+ test("when autoplay is true `media.play` should be called in static mode", async () => {
+ const { getByTestId } = await render(Video, {
+ show_label: true,
+ loading_status,
+ mode: "static",
+ value: [
+ {
+ name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
+ data: null,
+ is_file: true
+ }
+ ],
+ root: "foo",
+ root_url: null,
+ streaming: false,
+ pending: false,
+ source: "microphone",
+ autoplay: true
+ });
+ const startButton = getByTestId<HTMLAudioElement>("test-player");
+ const fn = spyOn(startButton, "play");
+ startButton.dispatchEvent(new Event("loadeddata"));
+ assert.equal(fn.callCount, 1);
+ });
+
+ test("when autoplay is true `media.play` should be called in dynamic mode", async () => {
+ const { getByTestId } = await render(Video, {
+ show_label: true,
+ loading_status,
+ mode: "dynamic",
+ value: [
+ {
+ name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
+ data: null,
+ is_file: true
+ }
+ ],
+ root: "foo",
+ root_url: null,
+ streaming: false,
+ pending: false,
+ source: "microphone",
+ autoplay: true
+ });
+ const startButton = getByTestId<HTMLAudioElement>("test-player");
+ const fn = spyOn(startButton, "play");
+ startButton.dispatchEvent(new Event("loadeddata"));
+ assert.equal(fn.callCount, 1);
+ });
+
+ test("when autoplay is true `media.play` should be called in static mode when the Video data is updated", async () => {
+ const { component, getByTestId } = await render(Video, {
+ show_label: true,
+ loading_status,
+ mode: "static",
+ value: [
+ {
+ name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
+ data: null,
+ is_file: true
+ }
+ ],
+ root: "foo",
+ root_url: null,
+ streaming: false,
+ pending: false,
+ source: "microphone",
+ autoplay: true
+ });
+ const startButton = getByTestId<HTMLAudioElement>("test-player");
+ const fn = spyOn(startButton, "play");
+ startButton.dispatchEvent(new Event("loadeddata"));
+ component.$set({
+ value: {
+ name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
+ data: null,
+ is_file: true
+ }
+ });
+ startButton.dispatchEvent(new Event("loadeddata"));
+ assert.equal(fn.callCount, 2);
+ });
+
+ test("when autoplay is true `media.play` should be called in dynamic mode when the Video data is updated", async () => {
+ const { component, getByTestId } = await render(Video, {
+ show_label: true,
+ loading_status,
+ mode: "dynamic",
+ value: [
+ {
+ name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
+ data: null,
+ is_file: true
+ }
+ ],
+ root: "foo",
+ root_url: null,
+ streaming: false,
+ pending: false,
+ source: "microphone",
+ autoplay: true
+ });
+ const startButton = getByTestId<HTMLAudioElement>("test-player");
+ const fn = spyOn(startButton, "play");
+ startButton.dispatchEvent(new Event("loadeddata"));
+ component.$set({
+ value: {
+ name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
+ data: null,
+ is_file: true
+ }
+ });
+ startButton.dispatchEvent(new Event("loadeddata"));
+ assert.equal(fn.callCount, 2);
+ });
+ test("renders video and download button", async () => {
+ const data = [
+ {
+ data: null,
+ name: "https://raw.githubusercontent.com/gradio-app/gradio/main/gradio/demo/video_component/files/a.mp4",
+ is_file: true
+ }
+ ];
+ const results = await render(Video, {
+ mode: "static",
+ label: "video",
+ show_label: true,
+ value: data,
+ root: "foo"
+ });
+
+ const downloadButton = results.getAllByTestId("download-div")[0];
+ expect(
+ downloadButton.getElementsByTagName("a")[0].getAttribute("href")
+ ).toBe(`foo/file=${data[0].name}`);
+ expect(
+ downloadButton.getElementsByTagName("button").length
+ ).toBeGreaterThan(0);
+ });
+});
diff --git a/js/form/src/Range.test.ts b/js/form/src/Range.test.ts
--- a/js/form/src/Range.test.ts
+++ b/js/form/src/Range.test.ts
@@ -6,8 +6,8 @@ import Range from "./Range.svelte";
describe("Range", () => {
afterEach(() => cleanup());
- test("Release event called on blur and pointerUp", () => {
- const results = render(Range, {
+ test("Release event called on blur and pointerUp", async () => {
+ const results = await render(Range, {
label: "range",
show_label: true,
value: 1,
diff --git a/js/video/src/StaticVideo.test.ts b/js/video/src/StaticVideo.test.ts
deleted file mode 100644
--- a/js/video/src/StaticVideo.test.ts
+++ /dev/null
@@ -1,35 +0,0 @@
-import "@testing-library/jest-dom";
-
-import { test, describe, afterEach } from "vitest";
-import { cleanup, render } from "@gradio/tootils";
-
-import StaticVideo from "./StaticVideo.svelte";
-
-describe("StaticVideo", () => {
- afterEach(() => cleanup());
-
- const data = {
- data: "https://raw.githubusercontent.com/gradio-app/gradio/main/gradio/demo/video_component/files/a.mp4",
- name: "a.mp4"
- };
-
- test("renders video and download button", () => {
- const results = render(StaticVideo, {
- label: "video",
- show_label: true,
- value: data
- });
-
- //expect(results.getAllByLabelText("video")).not.toThrow();
-
- const downloadButton = results.getAllByTestId("download-div")[0];
- expect(
- downloadButton.getElementsByTagName("a")[0].getAttribute("href")
- ).toBe(data.data);
- expect(
- downloadButton.getElementsByTagName("button").length
- ).toBeGreaterThan(0);
-
- expect(downloadButton.getElementsByTagName("button")[0]).toBeVisible();
- });
-});
| Video Unit tests
@pngwn taking care of this in #4705
| 2023-06-28T10:27:11 |
|
gradio-app/gradio | 4,797 | gradio-app__gradio-4797 | [
"4795"
] | b091c9165e4f8269cfeaba08741f333795b9a32f | diff --git a/gradio/blocks.py b/gradio/blocks.py
--- a/gradio/blocks.py
+++ b/gradio/blocks.py
@@ -1957,13 +1957,8 @@ def reverse(text):
# Check if running in a Python notebook in which case, display inline
if inline is None:
- inline = utils.ipython_check() and (self.auth is None)
+ inline = utils.ipython_check()
if inline:
- if self.auth is not None:
- print(
- "Warning: authentication is not supported inline. Please"
- "click the link to access the interface in a new tab."
- )
try:
from IPython.display import HTML, Javascript, display # type: ignore
| diff --git a/js/app/test/components.test.ts b/js/app/test/components.test.ts
--- a/js/app/test/components.test.ts
+++ b/js/app/test/components.test.ts
@@ -75,7 +75,7 @@ const components = [
// ["Plot", Plot, {}],
["Radio", Radio, {}],
["Slider", Slider, {}],
- ["Textbox", Textbox, {}],
+ ["Textbox", Textbox, { container: false }],
["TimeSeries", TimeSeries, {}],
["UploadButton", UploadButton, {}],
["Video", Video, {}]
| Login Page styling regressed
### Describe the bug
The login page styling looks weird. The gray boxes look bad and they are new. See this space: https://huggingface.co/spaces/gradio/hello_login
<img width="1166" alt="image" src="https://github.com/gradio-app/gradio/assets/41651716/7e8664d7-59b3-48a1-9e12-f755e3272fdd">
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
Go to this space https://huggingface.co/spaces/gradio/hello_login
Seems to be a regression introduced in the last release
### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
gradio 3.35.2
```
### Severity
I can work around it
| 2023-07-05T17:34:16 |
|
gradio-app/gradio | 4,886 | gradio-app__gradio-4886 | [
"4884"
] | 4bf301324b3b180fa32166ff1774312b01334c88 | diff --git a/client/python/gradio_client/client.py b/client/python/gradio_client/client.py
--- a/client/python/gradio_client/client.py
+++ b/client/python/gradio_client/client.py
@@ -550,7 +550,7 @@ def _infer_fn_index(self, api_name: str | None, fn_index: int | None) -> int:
if api_name is not None:
for i, d in enumerate(self.config["dependencies"]):
config_api_name = d.get("api_name")
- if config_api_name is None:
+ if config_api_name is None or config_api_name is False:
continue
if "/" + config_api_name == api_name:
inferred_fn_index = i
| diff --git a/client/python/test/test_client.py b/client/python/test/test_client.py
--- a/client/python/test/test_client.py
+++ b/client/python/test/test_client.py
@@ -366,6 +366,11 @@ def greet(name):
finally:
server.thread.join(timeout=1)
+ def test_predict_with_space_with_api_name_false(self):
+ client = Client("gradio-tests/client-bool-api-name-error")
+ assert client.predict("Hello!", api_name="/run") == "Hello!"
+ assert client.predict("Freddy", api_name="/say_hello") == "hello"
+
class TestStatusUpdates:
@patch("gradio_client.client.Endpoint.make_end_to_end_fn")
| `Client` doesn't work properly when a Space has a method with `api_name=False`
### Describe the bug
Originally reported [here](https://huggingface.co/spaces/hysts/Shap-E/discussions/18).
If a Space had a method with `api_name=False`, the following error occurs when calling it with `Client`:
```
TypeError: can only concatenate str (not "bool") to str
```
I think this error occurs when calling a Space with `gr.Examples`.
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
When I use `Client` to call [this Space](https://huggingface.co/spaces/hysts-debug/client-bool-api-name-error)
```python
import gradio as gr
with gr.Blocks() as demo:
text = gr.Text()
out = gr.Text()
btn = gr.Button()
text.submit(fn=lambda x: x, inputs=text, outputs=out, api_name=False)
btn.click(fn=lambda x: x, inputs=text, outputs=out, api_name='run')
demo.queue().launch()
```
with the following code
```python
from gradio_client import Client
client = Client('hysts-debug/client-bool-api-name-error')
out = client.predict('aaa', api_name='/run')
```
I get the following error.
```
Traceback (most recent call last):
File "/tmp/temp/run.py", line 4, in <module>
out = client.predict('aaa', api_name='/run')
File "/home/ubuntu/.virtualenvs/py31011/lib/python3.10/site-packages/gradio_client/client.py", line 285, in predict
return self.submit(*args, api_name=api_name, fn_index=fn_index).result()
File "/home/ubuntu/.virtualenvs/py31011/lib/python3.10/site-packages/gradio_client/client.py", line 313, in submit
inferred_fn_index = self._infer_fn_index(api_name, fn_index)
File "/home/ubuntu/.virtualenvs/py31011/lib/python3.10/site-packages/gradio_client/client.py", line 555, in _infer_fn_index
if "/" + config_api_name == api_name:
TypeError: can only concatenate str (not "bool") to str
```
If I remove the method with `api_name=False`, the error doesn't occur.
In the case of the [Shap-E Space](https://huggingface.co/spaces/hysts/Shap-E), it doesn't have methods with `api_name=False` explicitly, but as it has `gr.Examples`, I guess its method has `api_name=False` implicitly and causing this error.
If I change the code of the Space to the following, the same error occurs.
```python
import gradio as gr
with gr.Blocks() as demo:
text = gr.Text()
out = gr.Text()
btn = gr.Button()
gr.Examples(examples=['aaa', 'bbb'], inputs=text)
btn.click(fn=lambda x: x, inputs=text, outputs=out, api_name='run')
demo.queue().launch()
```
### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
gradio==3.36.1
gradio_client==0.2.8
```
### Severity
Blocking usage of gradio
| Hmm I thought we fixed this in the latest version of `gradio_client`, cc some discussion here: https://github.com/gradio-app/gradio/pull/4861
cc @freddyaboulton
It doesn't seem to be fixed in the `main` branch. I think this will be fixed if we replace [this line](https://github.com/gradio-app/gradio/blob/4bf301324b3b180fa32166ff1774312b01334c88/client/python/gradio_client/client.py#L553) with
```python
if not config_api_name:
``` | 2023-07-12T03:45:15 |
gradio-app/gradio | 4,904 | gradio-app__gradio-4904 | [
"4878"
] | d3e1d272d50245ce6f875bb6098113e44876b69e | diff --git a/gradio/components/audio.py b/gradio/components/audio.py
--- a/gradio/components/audio.py
+++ b/gradio/components/audio.py
@@ -194,7 +194,7 @@ def preprocess(
)
crop_min, crop_max = x.get("crop_min", 0), x.get("crop_max", 100)
if is_file:
- if utils.validate_url(file_name):
+ if client_utils.is_http_url_like(file_name):
temp_file_path = self.download_temp_copy_if_needed(file_name)
else:
temp_file_path = self.make_temp_copy_if_needed(file_name)
@@ -322,7 +322,7 @@ def postprocess(
"""
if y is None:
return None
- if isinstance(y, str) and utils.validate_url(y):
+ if isinstance(y, str) and client_utils.is_http_url_like(y):
return {"name": y, "data": None, "is_file": True}
if isinstance(y, tuple):
sample_rate, data = y
diff --git a/gradio/components/video.py b/gradio/components/video.py
--- a/gradio/components/video.py
+++ b/gradio/components/video.py
@@ -204,7 +204,11 @@ def preprocess(
if is_file:
assert file_name is not None, "Received file data without a file name."
- file_name = Path(self.make_temp_copy_if_needed(file_name))
+ if client_utils.is_http_url_like(file_name):
+ fn = self.download_temp_copy_if_needed
+ else:
+ fn = self.make_temp_copy_if_needed
+ file_name = Path(fn(file_name))
else:
assert file_data is not None, "Received empty file data."
file_name = Path(self.base64_to_temp_file_if_needed(file_data, file_name))
@@ -312,12 +316,14 @@ def _format_video(self, video: str | Path | None) -> FileData | None:
else:
conversion_needed = True
+ is_url = client_utils.is_http_url_like(video)
+
# For cases where the video is a URL and does not need to be converted to another format, we can just return the URL
- if utils.validate_url(video) and not (conversion_needed):
+ if is_url and not (conversion_needed):
return {"name": video, "data": None, "is_file": True}
# For cases where the video needs to be converted to another format
- if utils.validate_url(video):
+ if is_url:
video = self.download_temp_copy_if_needed(video)
if (
processing_utils.ffmpeg_installed()
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -1499,6 +1499,19 @@ def test_video_preprocessing_flips_video_for_webcam(self, mock_ffmpeg):
assert ".avi" in list(output_params.keys())[0]
assert ".avi" in output_file
+ @pytest.mark.flaky
+ def test_preprocess_url(self):
+ output = gr.Video().preprocess(
+ {
+ "name": "https://gradio-builds.s3.amazonaws.com/demo-files/a.mp4",
+ "is_file": True,
+ "data": None,
+ "size": None,
+ "orig_name": "https://gradio-builds.s3.amazonaws.com/demo-files/a.mp4",
+ }
+ )
+ assert Path(output).name == "a.mp4" and not client_utils.probe_url(output)
+
class TestTimeseries:
def test_component_functions(self):
| Video fails when value is a URL.
### Describe the bug
I am using a Video component with the value set as a public video URL. But the pre-process function is throwing the below error:
```
Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/gradio/routes.py", line 439, in run_predict
output = await app.get_blocks().process_api(
File "/usr/local/lib/python3.10/dist-packages/gradio/blocks.py", line 1382, in process_api
inputs = self.preprocess_data(fn_index, inputs, state)
File "/usr/local/lib/python3.10/dist-packages/gradio/blocks.py", line 1225, in preprocess_data
processed_input.append(block.preprocess(inputs[i]))
File "/usr/local/lib/python3.10/dist-packages/gradio/components/video.py", line 207, in preprocess
file_name = Path(self.make_temp_copy_if_needed(file_name))
File "/usr/local/lib/python3.10/dist-packages/gradio/components/base.py", line 220, in make_temp_copy_if_needed
temp_dir = self.hash_file(file_path)
File "/usr/local/lib/python3.10/dist-packages/gradio/components/base.py", line 184, in hash_file
with open(file_path, "rb") as f:
FileNotFoundError: [Errno 2] No such file or directory: 'public-url'
```
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
```
import gradio
url: str = "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4"
with gradio.Blocks() as interface:
video_in: gradio.Video = gradio.Video(value=url, autoplay=False)
video_out: gradio.Video = gradio.Video()
run: gradio.Button = gradio.Button()
run.click(lambda x: x, video_in, video_out)
interface.launch(debug=True, share=True)
```
### Screenshot
_No response_
### Logs
```shell
Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/gradio/routes.py", line 439, in run_predict
output = await app.get_blocks().process_api(
File "/usr/local/lib/python3.10/dist-packages/gradio/blocks.py", line 1382, in process_api
inputs = self.preprocess_data(fn_index, inputs, state)
File "/usr/local/lib/python3.10/dist-packages/gradio/blocks.py", line 1225, in preprocess_data
processed_input.append(block.preprocess(inputs[i]))
File "/usr/local/lib/python3.10/dist-packages/gradio/components/video.py", line 207, in preprocess
file_name = Path(self.make_temp_copy_if_needed(file_name))
File "/usr/local/lib/python3.10/dist-packages/gradio/components/base.py", line 220, in make_temp_copy_if_needed
temp_dir = self.hash_file(file_path)
File "/usr/local/lib/python3.10/dist-packages/gradio/components/base.py", line 184, in hash_file
with open(file_path, "rb") as f:
FileNotFoundError: [Errno 2] No such file or directory: 'public-url'
```
### System Info
```shell
1. Gradio version: 3.36.1
2. Ubuntu: 22.04
```
### Severity
I can work around it
| Thanks for filing @Sreerag-ibtl . We should pass through the URL in this case. | 2023-07-12T18:10:54 |
gradio-app/gradio | 4,976 | gradio-app__gradio-4976 | [
"4975"
] | a0efc1180d8613dabdd7eba025a99c396073c2f4 | diff --git a/gradio/chat_interface.py b/gradio/chat_interface.py
--- a/gradio/chat_interface.py
+++ b/gradio/chat_interface.py
@@ -177,6 +177,7 @@ def __init__(
)
self.saved_input = State()
+ self.chatbot_state = State([])
self._setup_events()
self._setup_api()
@@ -195,14 +196,14 @@ def _setup_events(self):
queue=False,
).then(
self._display_input,
- [self.saved_input, self.chatbot],
- [self.chatbot],
+ [self.saved_input, self.chatbot_state],
+ [self.chatbot, self.chatbot_state],
api_name=False,
queue=False,
).then(
submit_fn,
- [self.saved_input, self.chatbot],
- [self.chatbot],
+ [self.saved_input, self.chatbot_state],
+ [self.chatbot, self.chatbot_state],
api_name=False,
)
@@ -215,41 +216,41 @@ def _setup_events(self):
queue=False,
).then(
self._display_input,
- [self.saved_input, self.chatbot],
- [self.chatbot],
+ [self.saved_input, self.chatbot_state],
+ [self.chatbot, self.chatbot_state],
api_name=False,
queue=False,
).then(
submit_fn,
- [self.saved_input, self.chatbot],
- [self.chatbot],
+ [self.saved_input, self.chatbot_state],
+ [self.chatbot, self.chatbot_state],
api_name=False,
)
if self.retry_btn:
self.retry_btn.click(
self._delete_prev_fn,
- [self.chatbot],
- [self.chatbot, self.saved_input],
+ [self.chatbot_state],
+ [self.chatbot, self.saved_input, self.chatbot_state],
api_name=False,
queue=False,
).then(
self._display_input,
- [self.saved_input, self.chatbot],
- [self.chatbot],
+ [self.saved_input, self.chatbot_state],
+ [self.chatbot, self.chatbot_state],
api_name=False,
queue=False,
).then(
submit_fn,
- [self.saved_input, self.chatbot],
- [self.chatbot],
+ [self.saved_input, self.chatbot_state],
+ [self.chatbot, self.chatbot_state],
api_name=False,
)
if self.undo_btn:
self.undo_btn.click(
self._delete_prev_fn,
- [self.chatbot],
+ [self.chatbot_state],
[self.chatbot, self.saved_input],
api_name=False,
queue=False,
@@ -263,9 +264,9 @@ def _setup_events(self):
if self.clear_btn:
self.clear_btn.click(
- lambda: ([], None),
+ lambda: ([], [], None),
None,
- [self.chatbot, self.saved_input],
+ [self.chatbot, self.chatbot_state, self.saved_input],
queue=False,
api_name=False,
)
@@ -276,14 +277,10 @@ def _setup_api(self):
else:
api_fn = self._api_submit_fn
- # Use a gr.State() instead of self.chatbot so that the API doesn't require passing forth
- # a chat history, instead it is just stored internally in the state.
- history = State([])
-
self.fake_api_btn.click(
api_fn,
- [self.textbox, history],
- [self.textbox, history],
+ [self.textbox, self.chatbot_state],
+ [self.textbox, self.chatbot_state],
api_name="chat",
)
@@ -292,30 +289,33 @@ def _clear_and_save_textbox(self, message: str) -> tuple[str, str]:
def _display_input(
self, message: str, history: list[list[str | None]]
- ) -> list[list[str | None]]:
+ ) -> tuple[list[list[str | None]], list[list[str | None]]]:
history.append([message, None])
- return history
+ return history, history
def _submit_fn(
self, message: str, history_with_input: list[list[str | None]]
- ) -> list[list[str | None]]:
+ ) -> tuple[list[list[str | None]], list[list[str | None]]]:
history = history_with_input[:-1]
response = self.fn(message, history)
history.append([message, response])
- return history
+ return history, history
def _stream_fn(
self, message: str, history_with_input: list[list[str | None]]
- ) -> Generator[list[list[str | None]], None, None]:
+ ) -> Generator[tuple[list[list[str | None]], list[list[str | None]]], None, None]:
history = history_with_input[:-1]
generator = self.fn(message, history)
try:
first_response = next(generator)
- yield history + [[message, first_response]]
+ update = history + [[message, first_response]]
+ yield update, update
except StopIteration:
- yield history + [[message, None]]
+ update = history + [[message, None]]
+ yield update, update
for response in generator:
- yield history + [[message, response]]
+ update = history + [[message, response]]
+ yield update, update
def _api_submit_fn(
self, message: str, history: list[list[str | None]]
@@ -347,9 +347,9 @@ def _examples_stream_fn(
def _delete_prev_fn(
self, history: list[list[str | None]]
- ) -> tuple[list[list[str | None]], str]:
+ ) -> tuple[list[list[str | None]], str, list[list[str | None]]]:
try:
message, _ = history.pop()
except IndexError:
message = ""
- return history, message or ""
+ return history, message or "", history
diff --git a/gradio/events.py b/gradio/events.py
--- a/gradio/events.py
+++ b/gradio/events.py
@@ -3,7 +3,7 @@
from __future__ import annotations
-from typing import TYPE_CHECKING, Any, Callable, Literal
+from typing import TYPE_CHECKING, Any, Callable, Literal, Sequence
from gradio_client.documentation import document, set_documentation_group
@@ -91,8 +91,8 @@ def __init__(
def __call__(
self,
fn: Callable | None,
- inputs: Component | list[Component] | set[Component] | None = None,
- outputs: Component | list[Component] | None = None,
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
+ outputs: Component | Sequence[Component] | None = None,
api_name: str | None | Literal[False] = None,
status_tracker: None = None,
scroll_to_output: bool = False,
| diff --git a/test/test_chat_interface.py b/test/test_chat_interface.py
--- a/test/test_chat_interface.py
+++ b/test/test_chat_interface.py
@@ -1,4 +1,4 @@
-import time
+from concurrent.futures import wait
import pytest
@@ -96,8 +96,7 @@ def test_streaming_api(self, connect):
chatbot = gr.ChatInterface(stream).queue()
with connect(chatbot) as client:
job = client.submit("hello")
- while not job.done():
- time.sleep(0.1)
+ wait([job])
assert job.outputs() == ["h", "he", "hel", "hell", "hello"]
def test_non_streaming_api(self, connect):
| Switch `gr.ChatInterface` to use `gr.State` instead of passing the `gr.Chatbot` content back and forth
As [discussed internally](https://huggingface.slack.com/archives/C05G60G3DU3/p1689768571170509), we should switch `gr.ChatInterface` to use `gr.State` instead of passing the `gr.Chatbot` content back and forth to reduce the payload.
| 2023-07-19T23:05:16 |
|
gradio-app/gradio | 5,057 | gradio-app__gradio-5057 | [
"4888"
] | 56d2609de93387a75dc82b1c06c1240c5b28c0b8 | diff --git a/client/python/gradio_client/client.py b/client/python/gradio_client/client.py
--- a/client/python/gradio_client/client.py
+++ b/client/python/gradio_client/client.py
@@ -12,7 +12,7 @@
import urllib.parse
import uuid
import warnings
-from concurrent.futures import Future, TimeoutError
+from concurrent.futures import Future
from datetime import datetime
from pathlib import Path
from threading import Lock
@@ -283,6 +283,11 @@ def predict(
client.predict(5, "add", 4, api_name="/predict")
>> 9.0
"""
+ inferred_fn_index = self._infer_fn_index(api_name, fn_index)
+ if self.endpoints[inferred_fn_index].is_continuous:
+ raise ValueError(
+ "Cannot call predict on this function as it may run forever. Use submit instead."
+ )
return self.submit(*args, api_name=api_name, fn_index=fn_index).result()
def submit(
@@ -761,6 +766,7 @@ def __init__(self, client: Client, fn_index: int, dependency: dict):
self.input_component_types = []
self.output_component_types = []
self.root_url = client.src + "/" if not client.src.endswith("/") else client.src
+ self.is_continuous = dependency.get("types", {}).get("continuous", False)
try:
# Only a real API endpoint if backend_fn is True (so not just a frontend function), serializers are valid,
# and api_name is not False (meaning that the developer has explicitly disabled the API endpoint)
@@ -1103,7 +1109,7 @@ def result(self, timeout: float | None = None) -> Any:
Parameters:
timeout: The number of seconds to wait for the result if the future isn't done. If None, then there is no limit on the wait time.
Returns:
- The result of the call that the future represents.
+ The result of the call that the future represents. For generator functions, it will return the final iteration.
Example:
from gradio_client import Client
calculator = Client(src="gradio/calculator")
@@ -1111,25 +1117,7 @@ def result(self, timeout: float | None = None) -> Any:
job.result(timeout=5)
>> 9
"""
- if self.communicator:
- timeout = timeout or float("inf")
- if self.future._exception: # type: ignore
- raise self.future._exception # type: ignore
- with self.communicator.lock:
- if self.communicator.job.outputs:
- return self.communicator.job.outputs[0]
- start = datetime.now()
- while True:
- if (datetime.now() - start).seconds > timeout:
- raise TimeoutError()
- if self.future._exception: # type: ignore
- raise self.future._exception # type: ignore
- with self.communicator.lock:
- if self.communicator.job.outputs:
- return self.communicator.job.outputs[0]
- time.sleep(0.01)
- else:
- return super().result(timeout=timeout)
+ return super().result(timeout=timeout)
def outputs(self) -> list[tuple | Any]:
"""
| diff --git a/client/python/test/conftest.py b/client/python/test/conftest.py
--- a/client/python/test/conftest.py
+++ b/client/python/test/conftest.py
@@ -178,6 +178,32 @@ def show(n):
return demo.queue()
[email protected]
+def count_generator_demo_exception():
+ def count(n):
+ for i in range(int(n)):
+ time.sleep(0.1)
+ if i == 5:
+ raise ValueError("Oh no!")
+ yield i
+
+ def show(n):
+ return str(list(range(int(n))))
+
+ with gr.Blocks() as demo:
+ with gr.Column():
+ num = gr.Number(value=10)
+ with gr.Row():
+ count_btn = gr.Button("Count")
+ count_forever = gr.Button("Count forever")
+ with gr.Column():
+ out = gr.Textbox()
+
+ count_btn.click(count, num, out, api_name="count")
+ count_forever.click(show, num, out, api_name="count_forever", every=3)
+ return demo.queue()
+
+
@pytest.fixture
def file_io_demo():
demo = gr.Interface(
diff --git a/client/python/test/test_client.py b/client/python/test/test_client.py
--- a/client/python/test/test_client.py
+++ b/client/python/test/test_client.py
@@ -133,6 +133,17 @@ def test_intermediate_outputs(self, count_generator_demo):
outputs.append(o)
assert outputs == [str(i) for i in range(3)]
+ @pytest.mark.flaky
+ def test_intermediate_outputs_with_exception(self, count_generator_demo_exception):
+ with connect(count_generator_demo_exception) as client:
+ with pytest.raises(Exception):
+ client.predict(7, api_name="/count")
+
+ with pytest.raises(
+ ValueError, match="Cannot call predict on this function"
+ ):
+ client.predict(5, api_name="/count_forever")
+
def test_break_in_loop_if_error(self, calculator_demo):
with connect(calculator_demo) as client:
job = client.submit("foo", "add", 4, fn_index=0)
@@ -229,8 +240,9 @@ def test_cancel_from_client_queued(self, cancel_from_client_demo):
job.cancel()
break
time.sleep(0.5)
- # Result for iterative jobs is always the first result
- assert job.result() == 0
+ # Result for iterative jobs will raise there is an exception
+ with pytest.raises(CancelledError):
+ job.result()
# The whole prediction takes 10 seconds to run
# and does not iterate. So this tests that we can cancel
# halfway through a prediction
diff --git a/scripts/install_test_requirements.sh b/scripts/install_test_requirements.sh
--- a/scripts/install_test_requirements.sh
+++ b/scripts/install_test_requirements.sh
@@ -7,5 +7,5 @@ pip_required
echo "Installing requirements before running tests..."
pip install --upgrade pip
-pip install -r requirements.txt
pip install -r test/requirements.txt
+pip install -e client/python
| Install `gradio_client` locally in CI
I think we should install `gradio_client` in editable mode in CI, so the local version of `gradio_client` is used by `gradio` for Ci checks.
Currently `gradio` relies on the version of `gradio_client` available on pypi, which is correct. However, if we make changes to `gradio_client` that impact `gradio` CI checks can fail giving as those changes won't be used by the gradio version used in CI.
Currently this is annoying but understandable because the release processes for `gradio` and `gradio_client` are independent, we can release changes to one without the other even if said changes exist on main. When we move to a unified release process for the whole repository (#4851) any changes in gradio + gradio client will be released together (along with any other changes on main). This will mean that they can't get out of sync and an editable install of all python packages is safe (and desired because tests should reflect what will happen when the libraries are published as closely as possible).
The way the release process will work is that we look for dependency changes, if `gradio_client` is bumped and `gradio` depends on it, then the version of `gradio_client` in the requirements file will be bumped (if the current version doesn't satisfy the semver range of the dependency in the requirements file). This is also how JS dependencies will work in the future (if we have libraries that depend on libraries, which we will shortly).
Would love to get people's thoughts on this, and any context for the original decision to _not_ install the client in editable mode.
cc @freddyaboulton @abidlabs
| Can you explain how this works in practice:
> if gradio_client is bumped and gradio depends on it, then the version of gradio_client in the requirements file will be bumped (if the current version doesn't satisfy the semver range of the dependency in the requirements file)
If we do `gradio_client>=0.2.7` in gradio's requirements.txt then the version of `gradio_client` will always match the semver range of the `gradio` requirements file.
The reason for doing this was exactly what you said here:
> Currently this is annoying but understandable because the release processes for gradio and gradio_client are independent, we can release changes to one without the other even if said changes exist on main
The risk being we release a version of `gradio` without releasing a corresponding version of `gradio_client`. So `gradio` passes all of the checks on CI but is actually broken if someone was to download from pypi.
As far as I understand, the proposed release process does not fix this. Its still possible for us to make changes to `gradio_client` that are merged into `main` (but NOT released on pypi).
We could then make a release of `gradio` that depends on these changes so would be broken if downloaded from pypi
Unless the proposed changes also mean that every release of `gradio` includes a release of `gradio_client`?
If we could do the following workflow, then I think it would be okay. Say we want to do a release of `gradio` and there have been some changes to the `gradio_client` on `main` that have not been deployed to pypi, then the following steps MUST happen:
1. `gradio_client` should be released first
2. In the `requirements.txt` of `gradio`, the version of `gradio_client` dependency should be incremented
3. Then, `gradio` should be released
Okay @abidlabs and I caught up and discussed some of the concerns/ confusions. I'll summarise here:
Firstly, with the incoming unification of release, it will not be possible to release an individual library if there are changes in main that include a changeset. The whole repo (every public package in it) gets released together.
> Unless the proposed changes also mean that every release of gradio includes a release of gradio_client?
So the answer to this is yes. Assuming there were both changes to `gradio_client` and `gradio` they will be released together. If there are no changes to `gradio_client` but there are change to `gradio` then only gradio will be bumped and released.
I'll go through some scenarios to explain the flow more clearly and why this is 'safe'.
**We update `gradio` and not `gradio_client`**
The current `gradio` has a dependency on `gradio_client>=0.2.9`
- We make PRs as normal and instead of adding a changelog entry we create a changeset file detailing the changes + version.
- When PRs containing a changeset file is merged to main one of two things will happen
- if there is no 'release new version' PR, one is created and the version of gradio is bumped according to the version in the changeset and a changelog is generated.
- if there is already a 'release new version' PR then the PR is updated adding the contents of the additional changeset(s) to the changelog and bumping the version further if needed. If one change is `patch`, and seven others are `minor` gradio will be bumped by a single `minor` version lets say from `3.50` -> `3.51`.
- The `gradio` `requirements.txt` will be left as it is (reflecting the current version of `gradio_client`) as there are no changes to `gradio_client`.
- When we merge the 'release new version' PR into `main` the new version of `gradio` is released on pypi.
**We update `gradio` _and_ `gradio_client`**
The current `gradio` has a dependency on `gradio_client>=0.2.9`
- We make PRs as normal and instead of adding a changelog entry we create a changeset file detailing the changes + version.
- When PRs containing a changeset file is merged to main, the principles are the same as above.
- In this scenario we make changes to both `gradio` and `gradio_client` with the appropriate changeset files.
- Since the version of `gradio_client` has been incremented and all packages are release in lockstep, we need to increment the version of `gradio_client` that `gradio` depends upon in `gradio`'s `requirements.txt`. This is true for any local package that depends on another local package. If they depend on one another then we always increment the dependent to match the new version of the dependency.
- When we merge the 'release new version' PR into `main` new versions of both `gradio_client` and `gradio` are released in topological order (dependency -> dependent), if anything fails we abort, ensuring we don't release `gradio` without the appropriate `gradio_client` version. Our graph is small and simple here so this isn't very difficult to figure out.
So if the current version of `gradio` depends on `gradio_client>=0.2.9` and we bumped `gradio_client` to `0.3.0`. Then we automatically update `gradio` to depend on `gradio_client>=0.30` (by updating the `requirements.txt`). Even though the previous semver ranged matched, we always bump the version because that is what we test against in CI, the current local version (which is this 0.3.0). There may not be breaking changes and it may be fine but we don't have any confidence of that, so we we always keep them in sync.
**We update `gradio_client` but not `gradio`**
The current `gradio` has a dependency on `gradio_client>=0.2.9`
- We make PRs as normal and instead of adding a changelog entry we create a changeset file detailing the changes + version.
- When PRs containing a changeset file is merged to main, the principles are the same as above.
- In this scenario we make changes to both `gradio_client` with the appropriate changeset files.
- this is identical to the above. We see that the `gradio_client` version has been released and we automatically bump the version in `gradio`. We do this to keep things in sync. We have pretty much 0 knowledge about mismatched version and want to avoid that just to be safe, even though this probably won't cause any issue. The version are really just an implementation detail, it is very to think about it like 'gradio depends on the local version of `gradio_client`, we just fix up the requirements file to reflect what that will mean when published. We want whatever gets published to be _exactly_ what we last ran CI on (on release), while it is unlikely to cause any issues, it is safer to keep them synced. "local gradio version" converted for publishing just means "whatever the current version for that dep is", we could even consider a hard pin `==`.
I hope this helps clear things up.
---
Now one concern that @abidlabs raised is this:
> 1. gradio_client should be released first
> 2. In the requirements.txt of gradio, the version of gradio_client dependency should be incremented
> 3. Then, gradio should be released
The reason that the version bumps are potentially problematic is mainly in the 'release new version' PR. In this PR when we install gradio (with its updated `requirements.txt` file), it will try to install a version of `gradio_client` that doesn't exist on pypi.
However, it turns out that this isn't an issue, as long as we install `gradio_client` in editable mode _first_ and then `gradio` then the concrete version of `gradio_client` that `gradio` will grab is the version installed from source. We checked this and it works fine.
This is the only place the version could cause a potential issue because the version are not bumped when every changeset PR is merged to main, they are only bumped in the 'release new versions' PR. And when that is merged into main the version actually get released. There may be a brief window of a few seconds when someone working locally could have issues but that is about the worst thing that can happen. If they try again 2 seconds later (or just install `gradio_client` in editable mode first), they wont have an issue.
--
So to summarise, i think the above solution for installing both `gradio` and `gradio_client` in editable mode (even if the version is not available on pypi), the automatic dependency updates, and the unified release process will put us in a place where our CI is a near perfect reflection of what gradio will actually behave like _when it is published_ which I think is pretty much ideal.
---
Apologies for the wall of text but i didn't want to leave any thing to chance. I hope this was helpful and just let me know if you have any other questions or concerns.
Amazing @pngwn thanks for walking through all of the cases. LGTM
Thank you so much for the detailed summary @pngwn !
> we could even consider a hard pin
Yes I think how you describe the version bumps of `gradio_client` in `gradio` are equivalent to a hard pin.
Yeah, they are basically. | 2023-08-01T15:22:10 |
gradio-app/gradio | 5,074 | gradio-app__gradio-5074 | [
"5006"
] | d7f83823fbd7604456b0127d689a63eed759807d | diff --git a/demo/clear_components/run.py b/demo/clear_components/run.py
--- a/demo/clear_components/run.py
+++ b/demo/clear_components/run.py
@@ -7,13 +7,12 @@
import numpy as np
import matplotlib.pyplot as plt
-import random
-import os
+
def random_plot():
start_year = 2020
- x = np.arange(start_year, start_year + random.randint(0, 10))
+ x = np.arange(start_year, start_year + 5)
year_count = x.shape[0]
plt_format = "-"
fig = plt.figure()
@@ -115,7 +114,7 @@ def random_model3d():
)
),
gr.Dataframe(
- value=lambda: pd.DataFrame({"random_number_rows": range(random.randint(0, 10))})
+ value=lambda: pd.DataFrame({"random_number_rows": range(5)}, columns=["one", "two", "three"])
),
gr.Timeseries(value=lambda: os.path.join(file_dir, "time.csv")),
gr.ColorPicker(value=lambda: random.choice(["#000000", "#ff0000", "#0000FF"])),
| diff --git a/js/app/src/components/Audio/Audio.test.ts b/js/app/src/components/Audio/Audio.test.ts
deleted file mode 100644
--- a/js/app/src/components/Audio/Audio.test.ts
+++ /dev/null
@@ -1,352 +0,0 @@
-import { test, describe, assert, afterEach, vi, beforeAll } from "vitest";
-import { spy, spyOn } from "tinyspy";
-import { cleanup, render, wait_for_event, wait } from "@gradio/tootils";
-import event from "@testing-library/user-event";
-import { setupi18n } from "../../i18n";
-import { tick } from "svelte";
-
-import Audio from "./Audio.svelte";
-import type { LoadingStatus } from "../StatusTracker/types";
-
-const loading_status = {
- eta: 0,
- queue_position: 1,
- queue_size: 1,
- status: "complete" as LoadingStatus["status"],
- scroll_to_output: false,
- visible: true,
- fn_index: 0,
- show_progress: "full" as LoadingStatus["show_progress"]
-};
-
-describe("Audio", () => {
- beforeAll(() => {
- window.HTMLMediaElement.prototype.play = vi.fn();
- window.HTMLMediaElement.prototype.pause = vi.fn();
- });
- afterEach(() => cleanup());
-
- test("renders provided value and label", async () => {
- const { getByTestId, queryAllByText } = await render(Audio, {
- show_label: true,
- loading_status,
- mode: "dynamic",
- value: {
- name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
- data: null,
- is_file: true
- },
- label: "Audio Component",
- root: "foo",
- root_url: null,
- streaming: false,
- pending: false,
- name: "bar",
- source: "upload"
- });
-
- assert.isTrue(
- getByTestId("Audio Component-audio").src.endsWith(
- "foo/file=https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav"
- )
- );
- assert(queryAllByText("Audio Component").length, 1);
- });
-
- test("hides label", async () => {
- const { queryAllByText } = await render(Audio, {
- show_label: false,
- loading_status,
- mode: "dynamic",
- value: {
- name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
- data: null,
- is_file: true
- },
- label: "Audio Component",
- root: "foo",
- root_url: null,
- streaming: false,
- pending: false,
- name: "bar",
- source: "upload"
- });
-
- assert(queryAllByText("Audio Component").length, 0);
- });
-
- test("upload sets change event", async () => {
- setupi18n();
- const { container, component } = await render(Audio, {
- show_label: false,
- loading_status,
- value: null,
- mode: "dynamic",
- label: "audio",
- root: "foo",
- root_url: null,
- streaming: false,
- name: "bar",
- source: "upload"
- });
-
- const item = container.querySelectorAll("input")[0];
- const file = new File(["hello"], "my-audio.wav", { type: "audio/wav" });
- event.upload(item, file);
- const mock = await wait_for_event(component, "change");
- assert.equal(mock.callCount, 1);
- assert.equal(
- component.$capture_state().value.data,
- "data:audio/wav;base64,aGVsbG8="
- );
- assert.equal(component.$capture_state().value.name, "my-audio.wav");
- });
-
- test("static audio sets value", async () => {
- const { getByTestId } = await render(Audio, {
- show_label: true,
- loading_status,
- mode: "static",
- value: {
- name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
- data: null,
- is_file: true
- },
- label: "Audio Component",
- root: "foo",
- root_url: null,
- streaming: false,
- pending: false,
- name: "bar",
- source: "upload"
- });
-
- assert.isTrue(
- getByTestId("Audio Component-audio").src.endsWith(
- "foo/file=https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav"
- )
- );
- });
-
- test("stop recording sets data", async () => {
- let data_event;
- let stop_event;
-
- const media_recorder_mock = vi.fn((s, x) => {
- return {
- start: vi.fn(() => {
- data_event({ data: "hello" });
- data_event({ data: "hello" });
- data_event({ data: "hello" });
- data_event({ data: "hello" });
- }),
- stop: vi.fn(async () => {
- await stop_event();
- }),
- addEventListener: vi.fn((evt, cb) => {
- if (evt === "dataavailable") {
- data_event = cb;
- }
-
- if (evt === "stop") {
- stop_event = cb;
- }
- })
- };
- });
-
- const media_mock = {
- mediaDevices: {
- getUserMedia: vi.fn(() => Promise.resolve(true))
- }
- };
-
- vi.stubGlobal("navigator", media_mock);
- vi.stubGlobal("MediaRecorder", media_recorder_mock);
-
- const { component, getByText } = await render(Audio, {
- show_label: true,
- loading_status,
- mode: "dynamic",
- value: null,
- label: "Audio Component",
- root: "foo",
- root_url: null,
- streaming: false,
- pending: false,
- source: "microphone",
- name: "bar"
- });
-
- const startButton = getByText("Record from microphone");
- await event.click(startButton);
- const stopButton = getByText("Stop recording");
- await event.click(stopButton);
- const mock = await wait_for_event(component, "stop_recording");
-
- assert.equal(
- component.$capture_state().value.data,
- "data:audio/wav;base64,aGVsbG9oZWxsb2hlbGxvaGVsbG8="
- );
- assert.equal(component.$capture_state().value.name, "audio.wav");
- assert.equal(mock.callCount, 1);
- });
-
- test("when autoplay is true `media.play` should be called in static mode", async () => {
- const { getByTestId } = await render(Audio, {
- show_label: true,
- loading_status,
- mode: "static",
- value: {
- name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
- data: null,
- is_file: true
- },
- label: "static",
- root: "foo",
- root_url: null,
- streaming: false,
- pending: false,
- source: "microphone",
- autoplay: true
- });
-
- const startButton = getByTestId<HTMLAudioElement>("static-audio");
- const fn = spyOn(startButton, "play");
- startButton.dispatchEvent(new Event("loadeddata"));
-
- assert.equal(fn.callCount, 1);
- });
-
- test("when autoplay is true `media.play` should be called in dynamic mode", async () => {
- const { getByTestId } = await render(Audio, {
- show_label: true,
- loading_status,
- mode: "dynamic",
- value: {
- name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
- data: null,
- is_file: true
- },
- label: "dynamic",
- root: "foo",
- root_url: null,
- streaming: false,
- pending: false,
- source: "microphone",
- autoplay: true
- });
-
- const startButton = getByTestId<HTMLAudioElement>("dynamic-audio");
- const fn = spyOn(startButton, "play");
- startButton.dispatchEvent(new Event("loadeddata"));
-
- assert.equal(fn.callCount, 1);
- });
-
- test("when autoplay is true `media.play` should be called in static mode when the audio data is updated", async () => {
- const { component, getByTestId } = await render(Audio, {
- show_label: true,
- loading_status,
- mode: "static",
- value: {
- name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
- data: null,
- is_file: true
- },
- label: "static",
- root: "foo",
- root_url: null,
- streaming: false,
- pending: false,
- source: "microphone",
- autoplay: true
- });
-
- const startButton = getByTestId<HTMLAudioElement>("static-audio");
- const fn = spyOn(startButton, "play");
-
- startButton.dispatchEvent(new Event("loadeddata"));
-
- component.$set({
- value: {
- name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
- data: null,
- is_file: true
- }
- });
-
- startButton.dispatchEvent(new Event("loadeddata"));
-
- assert.equal(fn.callCount, 2);
- });
-
- test("when autoplay is true `media.play` should be called in dynamic mode when the audio data is updated", async () => {
- const { component, getByTestId } = await render(Audio, {
- show_label: true,
- loading_status,
- mode: "dynamic",
- value: {
- name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
- data: null,
- is_file: true
- },
- label: "dynamic",
- root: "foo",
- root_url: null,
- streaming: false,
- pending: false,
- source: "microphone",
- autoplay: true
- });
-
- const startButton = getByTestId<HTMLAudioElement>("dynamic-audio");
- const fn = spyOn(startButton, "play");
-
- startButton.dispatchEvent(new Event("loadeddata"));
-
- component.$set({
- value: {
- name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
- data: null,
- is_file: true
- }
- });
-
- startButton.dispatchEvent(new Event("loadeddata"));
-
- assert.equal(fn.callCount, 2);
- });
-
- test("audio change event trigger fires when value is changed and only fires once", async () => {
- const { component } = await render(Audio, {
- show_label: true,
- loading_status,
- mode: "static",
- value: {
- name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav",
- data: null,
- is_file: true
- },
- label: "static",
- root: "foo",
- root_url: null,
- streaming: false,
- pending: false,
- source: "microphone",
- autoplay: true
- });
-
- const mock = spy();
- component.$on("change", mock);
-
- (component.value = [
- {
- name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample2.wav",
- data: null,
- is_file: true
- }
- ]),
- assert.equal(mock.callCount, 1);
- });
-});
diff --git a/js/app/src/components/Button/Button.test.ts b/js/app/src/components/Button/Button.test.ts
deleted file mode 100644
--- a/js/app/src/components/Button/Button.test.ts
+++ /dev/null
@@ -1,28 +0,0 @@
-import { test, describe, assert, afterEach } from "vitest";
-import { spy } from "tinyspy";
-import { cleanup, fireEvent, render } from "@gradio/tootils";
-
-import Button from "./Button.svelte";
-
-describe("Hello.svelte", () => {
- afterEach(() => cleanup());
-
- test.skip("renders label text", async () => {
- const { container, component } = await render(Button, {
- value: "Click Me"
- });
- assert.equal(container.innerText, "Click Me");
- });
-
- test.skip("triggers callback when clicked", async () => {
- const { container, component } = await render(Button, {
- value: "Click Me"
- });
- const mock = spy();
- component.$on("click", mock);
-
- fireEvent.click(container.querySelector("button")!);
-
- assert.isTrue(mock.called);
- });
-});
diff --git a/js/app/src/components/ColorPicker/ColorPicker.test.ts b/js/app/src/components/ColorPicker/ColorPicker.test.ts
deleted file mode 100644
--- a/js/app/src/components/ColorPicker/ColorPicker.test.ts
+++ /dev/null
@@ -1,52 +0,0 @@
-import { test, describe, assert, afterEach } from "vitest";
-import { cleanup, render } from "@gradio/tootils";
-
-import ColorPicker from "./ColorPicker.svelte";
-import type { LoadingStatus } from "../StatusTracker/types";
-
-const loading_status = {
- eta: 0,
- queue_position: 1,
- queue_size: 1,
- status: "complete" as LoadingStatus["status"],
- scroll_to_output: false,
- visible: true,
- fn_index: 0
-};
-
-describe("ColorPicker", () => {
- afterEach(() => cleanup());
-
- test("renders provided value", async () => {
- const { getByDisplayValue } = await render(ColorPicker, {
- loading_status,
- show_label: true,
- mode: "dynamic",
- value: "#000000",
- label: "ColorPicker"
- });
-
- const item: HTMLInputElement = getByDisplayValue("#000000");
- assert.equal(item.value, "#000000");
- });
-
- test("changing the color should update the value", async () => {
- const { component, getByDisplayValue } = await render(ColorPicker, {
- loading_status,
- show_label: true,
- mode: "dynamic",
- value: "#000000",
- label: "ColorPicker"
- });
-
- const item: HTMLInputElement = getByDisplayValue("#000000");
-
- assert.equal(item.value, "#000000");
-
- await component.$set({
- value: "#FFFFFF"
- });
-
- assert.equal(component.value, "#FFFFFF");
- });
-});
diff --git a/js/app/src/components/File/File.test.ts b/js/app/src/components/File/File.test.ts
deleted file mode 100644
--- a/js/app/src/components/File/File.test.ts
+++ /dev/null
@@ -1,76 +0,0 @@
-import { test, describe, expect, afterEach, vi, assert } from "vitest";
-import { cleanup, render } from "@gradio/tootils";
-import { spy } from "tinyspy";
-
-import File from "./File.svelte";
-import type { LoadingStatus } from "../StatusTracker/types";
-import { upload_files } from "@gradio/client";
-
-const loading_status = {
- eta: 0,
- queue_position: 1,
- queue_size: 1,
- status: "complete" as LoadingStatus["status"],
- scroll_to_output: false,
- visible: true,
- fn_index: 0
-};
-
-describe("File", () => {
- afterEach(() => {
- cleanup();
- vi.restoreAllMocks();
- });
-
- test("gr.File uploads with blob", async () => {
- vi.mock("@gradio/client", async () => {
- return {
- upload_files: vi.fn((f) => new Promise((res) => res({})))
- };
- });
-
- const api = await import("@gradio/client");
-
- await render(File, {
- loading_status,
- label: "file",
- // @ts-ignore
- value: { name: "freddy.json", data: "{'name': 'freddy'}", blob: vi.fn() },
- show_label: true,
- mode: "dynamic",
- root: "http://localhost:7860",
- file_count: "1",
- root_url: null
- });
-
- expect(api.upload_files).toHaveBeenCalled();
- });
-
- test("gr.File does not upload without blob", async () => {
- const mockUpload = vi.fn(upload_files);
-
- const { component } = await render(File, {
- loading_status,
- label: "file",
- value: { name: "freddy.json", data: "{'name': 'freddy'}" },
- show_label: true,
- mode: "dynamic",
- root: "http://localhost:7860",
- file_count: "1",
- root_url: null
- });
-
- expect(mockUpload).not.toHaveBeenCalled();
-
- const mock = spy();
- component.$on("change", mock);
-
- component.value = {
- name: "freddy_2.json",
- data: "{'name': 'freddy'}",
- is_file: true
- };
-
- assert.equal(mock.callCount, 1);
- });
-});
diff --git a/js/app/src/components/Gallery/Gallery.test.ts b/js/app/src/components/Gallery/Gallery.test.ts
deleted file mode 100644
--- a/js/app/src/components/Gallery/Gallery.test.ts
+++ /dev/null
@@ -1,104 +0,0 @@
-import { test, describe, assert, afterEach, vi } from "vitest";
-import { cleanup, render } from "@gradio/tootils";
-
-import Gallery from "./Gallery.svelte";
-import type { LoadingStatus } from "../StatusTracker/types";
-
-const loading_status: LoadingStatus = {
- eta: 0,
- queue_position: 1,
- queue_size: 1,
- status: "complete",
- scroll_to_output: false,
- visible: true,
- fn_index: 0,
- show_progress: "full"
-};
-
-describe("Gallery", () => {
- afterEach(() => {
- cleanup();
- vi.restoreAllMocks();
- });
-
- test("preview shows detailed image by default", async () => {
- window.Element.prototype.scrollTo = vi.fn(() => {});
-
- const { getAllByTestId, getByTestId } = await render(Gallery, {
- loading_status,
- label: "gallery",
- // @ts-ignore
- value: [
- [
- {
- name: "https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80",
- data: null,
- is_file: true
- },
- "label 0"
- ],
- [
- {
- name: "https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80",
- data: null,
- is_file: true
- },
- "label 1"
- ]
- ],
- show_label: true,
- mode: "dynamic",
- root: "http://localhost:7860",
- root_url: null,
- preview: true
- });
-
- const details = getAllByTestId("detailed-image");
-
- assert.equal(details.length, 1);
- });
-
- test("detailed view does not show larger image", async () => {
- const { queryAllByTestId, getByTestId } = await render(Gallery, {
- loading_status,
- label: "gallery",
- // @ts-ignore
- value: [
- [
- {
- name: "https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80",
- data: null,
- is_file: true
- },
- "label 0"
- ],
- [
- {
- name: "https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80",
- data: null,
- is_file: true
- },
- "label 1"
- ],
- [
- {
- name: "https://images.unsplash.com/photo-1601412436009-d964bd02edbc?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=464&q=80",
- data: null,
- is_file: true
- },
- "label 2"
- ]
- ],
- show_label: true,
- mode: "dynamic",
- root: "http://localhost:7860",
- root_url: null,
- preview: true,
- allow_preview: false
- });
-
- const details = queryAllByTestId("detailed-image");
-
- assert.equal(details.length, 0);
- });
-});
diff --git a/js/app/test/chatinterface_streaming_echo.spec.ts b/js/app/test/chatinterface_streaming_echo.spec.ts
--- a/js/app/test/chatinterface_streaming_echo.spec.ts
+++ b/js/app/test/chatinterface_streaming_echo.spec.ts
@@ -9,47 +9,52 @@ test("chatinterface works with streaming functions and all buttons behave as exp
const clear_button = await page.locator("button").nth(4);
const textbox = await page.getByTestId("textbox").nth(0);
- let last_iteration;
+ let iterations: Promise<any>[] = [];
page.on("websocket", (ws) => {
- last_iteration = ws.waitForEvent("framereceived", {
- predicate: (event) => {
- return JSON.parse(event.payload as string).msg === "process_completed";
- }
- });
+ iterations.push(
+ ws.waitForEvent("framereceived", {
+ predicate: (event) => {
+ return (
+ JSON.parse(event.payload as string).msg === "process_completed"
+ );
+ }
+ })
+ );
});
await textbox.fill("hello");
await submit_button.click();
- await last_iteration;
+ await iterations[0];
await expect(textbox).toHaveValue("");
- await expect.poll(async () => page.locator('.bot.message p').count()).toBe(1);
+ await expect.poll(async () => page.locator(".bot.message p").count()).toBe(1);
const bot_message_0 = await page.locator(".bot.message p").nth(0);
- await expect(bot_message_0).toContainText("You typed: hello");
+ await expect(bot_message_0).toContainText("You typed: hello");
await textbox.fill("hi");
await submit_button.click();
- await last_iteration;
+ await iterations[1];
await expect(textbox).toHaveValue("");
- await expect.poll(async () => page.locator('.bot.message p').count()).toBe(2);
+ await expect.poll(async () => page.locator(".bot.message p").count()).toBe(2);
const bot_message_1 = await page.locator(".bot.message p").nth(1);
- await expect(bot_message_1).toContainText("You typed: hi");
+ await expect(bot_message_1).toContainText("You typed: hi");
await retry_button.click();
- await last_iteration;
+ await iterations[2];
await expect(textbox).toHaveValue("");
- await expect(bot_message_1).toContainText("You typed: hi");
+ await expect(bot_message_1).toContainText("You typed: hi");
await undo_button.click();
- await expect.poll(async () => page.locator('.bot.message p').count()).toBe(1);
+ await iterations[3];
+ await expect.poll(async () => page.locator(".bot.message p").count()).toBe(1);
await expect(textbox).toHaveValue("hi");
await textbox.fill("salaam");
await submit_button.click();
- await last_iteration;
+ await iterations[4];
await expect(textbox).toHaveValue("");
- await expect.poll(async () => page.locator('.bot.message p').count()).toBe(2);
- await expect(bot_message_1).toContainText("You typed: salaam");
+ await expect.poll(async () => page.locator(".bot.message p").count()).toBe(2);
+ await expect(bot_message_1).toContainText("You typed: salaam");
await clear_button.click();
- await expect.poll(async () => page.locator('.bot.message p').count()).toBe(0);
+ await expect.poll(async () => page.locator(".bot.message p").count()).toBe(0);
});
diff --git a/js/app/test/components.test.ts b/js/app/test/components.test.ts
--- a/js/app/test/components.test.ts
+++ b/js/app/test/components.test.ts
@@ -10,30 +10,30 @@ import {
import { render, cleanup } from "@gradio/tootils";
import { setupi18n } from "../src/i18n";
-import AnnotatedImage from "../src/components/AnnotatedImage/AnnotatedImage.svelte";
-import Audio from "../src/components/Audio/Audio.svelte";
+import AnnotatedImage from "@gradio/annotatedimage";
+import Audio from "@gradio/audio";
import Chatbot from "@gradio/chatbot";
-import Checkbox from "../src/components/Checkbox/Checkbox.svelte";
-import CheckboxGroup from "../src/components/CheckboxGroup/CheckboxGroup.svelte";
-import ColorPicker from "../src/components/ColorPicker/ColorPicker.svelte";
-import DataFrame from "../src/components/DataFrame/DataFrame.svelte";
-import Dropdown from "../src/components/Dropdown/Dropdown.svelte";
-import File from "../src/components/File/File.svelte";
-import Gallery from "../src/components/Gallery/Gallery.svelte";
-import HTML from "../src/components/HTML/HTML.svelte";
-import HighlightedText from "../src/components/HighlightedText/HighlightedText.svelte";
-import Json from "../src/components/Json/Json.svelte";
-import Label from "../src/components/Label/Label.svelte";
-import Markdown from "../src/components/Markdown/Markdown.svelte";
-import Model3D from "../src/components/Model3D/Model3D.svelte";
-import Number from "../src/components/Number/Number.svelte";
-import Radio from "../src/components/Radio/Radio.svelte";
-import Slider from "../src/components/Slider/Slider.svelte";
-import Textbox from "../src/components/Textbox/Textbox.svelte";
-import TimeSeries from "../src/components/TimeSeries/TimeSeries.svelte";
-import UploadButton from "../src/components/UploadButton/UploadButton.svelte";
-import Video from "../src/components/Video/Video.svelte";
-import { LoadingStatus } from "../src/components/StatusTracker/types";
+import Checkbox from "@gradio/checkbox";
+import CheckboxGroup from "@gradio/checkboxgroup";
+import ColorPicker from "@gradio/colorpicker";
+import DataFrame from "@gradio/dataframe";
+import Dropdown from "@gradio/dropdown";
+import File from "@gradio/file";
+import Gallery from "@gradio/gallery";
+import HTML from "@gradio/html";
+import HighlightedText from "@gradio/highlightedtext";
+import Json from "@gradio/json";
+import Label from "@gradio/label";
+import Markdown from "@gradio/markdown";
+import Model3D from "@gradio/model3d";
+import Number from "@gradio/number";
+import Radio from "@gradio/radio";
+import Slider from "@gradio/slider";
+import Textbox from "@gradio/textbox";
+import TimeSeries from "@gradio/timeseries";
+import UploadButton from "@gradio/uploadbutton";
+import Video from "@gradio/video";
+import { LoadingStatus } from "@gradio/statustracker/types";
const loading_status: LoadingStatus = {
eta: 0,
diff --git a/js/app/test/tests.md b/js/app/test/tests.md
--- a/js/app/test/tests.md
+++ b/js/app/test/tests.md
@@ -5,134 +5,178 @@ Just a little reference docs to understand what is tested/ needs testing. Perhap
## Interface
## Flagging
+
## Blocks
+
## Block Layouts
+
## Themes
## Components
### Props/kwargs
-| Component | `value` | `visible` | `elem_id` | `elem_classes` | `container` | `label` | `show_label` |
-| --------------- | --------------- | ---------------- | ---------------- | ------------------- | ----------------- | --------- | ---------------|
-| AnnotatedImage | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| Audio | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| BarPlot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Button | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Chatbot | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| Checkbox | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| CheckboxGroup | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| ClearButton | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| Code | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| ColorPicker | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| Dataframe | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| Dropdown | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| File | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| Gallery | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| HTML | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| HighlightedText | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| Image | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| JSON | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| Label | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| Lineplot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Markdown | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| Model3D | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| Number | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| Plot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Radio | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| ScatterPlot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Slider | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| Textbox | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| Timeseries | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| UploadButton | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
-| Video | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| Component | `value` | `visible` | `elem_id` | `elem_classes` | `container` | `label` | `show_label` |
+| --------------- | ------- | --------- | --------- | -------------- | ----------- | ------- | ------------ | --- |
+| AnnotatedImage | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| Audio | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| BarPlot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Button | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Chatbot | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| Checkbox | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| CheckboxGroup | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| ClearButton | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| Code | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| ColorPicker | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| Dataframe | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| Dropdown | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| File | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| Gallery | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| HTML | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| HighlightedText | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| Image | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | |
+| JSON | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| Label | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| Lineplot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Markdown | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| Model3D | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| Number | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| Plot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Radio | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| ScatterPlot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Slider | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| Textbox | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| Timeseries | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| UploadButton | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
+| Video | `❌` | `✅` | `✅` | `✅` | `❌` | `✅` | `❌` |
### Events
-| Component | `value` | `visible` | `elem_id` | `elem_classes` | `container` | `label` | `show_label` |
-| --------------- | --------------- | ---------------- | ---------------- | ------------------- | ----------------- | --------- | ---------------|
-| AnnotatedImage | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Audio | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| BarPlot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Button | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Chatbot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Checkbox | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| CheckboxGroup | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| ClearButton | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Code | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| ColorPicker | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Dataframe | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Dataset | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Dropdown | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| File | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Gallery | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| HTML | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| HighlightedText | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Image | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Interpretation | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| JSON | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Label | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Lineplot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Markdown | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Model3D | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Number | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Plot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Radio | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| ScatterPlot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Slider | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| State | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Textbox | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Timeseries | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| UploadButton | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
-| Video | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Component | `value` | `visible` | `elem_id` | `elem_classes` | `container` | `label` | `show_label` |
+| --------------- | ------- | --------- | --------- | -------------- | ----------- | ------- | ------------ | --- |
+| AnnotatedImage | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Audio | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| BarPlot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Button | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Chatbot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Checkbox | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| CheckboxGroup | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| ClearButton | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Code | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| ColorPicker | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Dataframe | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Dataset | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Dropdown | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| File | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Gallery | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| HTML | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| HighlightedText | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Image | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | |
+| Interpretation | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| JSON | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Label | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Lineplot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Markdown | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Model3D | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Number | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Plot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Radio | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| ScatterPlot | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Slider | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| State | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Textbox | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Timeseries | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| UploadButton | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
+| Video | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` | `❌` |
### `AnnotatedImage`
+
### `Audio`
+
### `BarPlot`
+
### `Button`
+
### `Chatbot`
+
### `Checkbox`
+
### `CheckboxGroup`
+
### `ClearButton`
+
### `Code`
+
### `ColorPicker`
+
### `Dataframe`
+
### `Dataset`
+
### `Dropdown`
+
### `File`
+
### `Gallery`
+
### `HTML`
+
### `HighlightedText`
+
### `Image`
+
### `Interpretation`
+
### `JSON`
+
### `Label`
+
### `Lineplot`
+
### `Markdown`
+
### `Model3D`
+
### `Number`
+
### `Plot`
+
### `Radio`
+
### `ScatterPlot`
+
### `Slider`
+
### `State`
+
### `Textbox`
+
### `Timeseries`
+
### `UploadButton`
+
### `Video`
## Helpers
### Error
+
### load
+
### Examples
+
### Progress
+
### update
+
### make_waveform
+
### EventData
-## Routes
+## Routes
+
### `Request`
+
### `mount_gradio_app`
## Clients
diff --git a/js/chatbot/src/Chatbot.test.ts b/js/chatbot/Chatbot.test.ts
similarity index 97%
rename from js/chatbot/src/Chatbot.test.ts
rename to js/chatbot/Chatbot.test.ts
--- a/js/chatbot/src/Chatbot.test.ts
+++ b/js/chatbot/Chatbot.test.ts
@@ -1,7 +1,7 @@
import { test, describe, assert, afterEach } from "vitest";
import { cleanup, render } from "@gradio/tootils";
-import Chatbot from "./Index.svelte";
-import type { LoadingStatus } from "../../app/src/components/StatusTracker/types";
+import Chatbot from "./index.svelte";
+import type { LoadingStatus } from "@gradio/statustracker/types";
import type { FileData } from "@gradio/upload";
const loading_status: LoadingStatus = {
diff --git a/js/form/src/Range.test.ts b/js/form/src/Range.test.ts
deleted file mode 100644
--- a/js/form/src/Range.test.ts
+++ /dev/null
@@ -1,29 +0,0 @@
-import { test, describe, afterEach, vi, expect } from "vitest";
-import { cleanup, render, fireEvent } from "@gradio/tootils";
-
-import Range from "./Range.svelte";
-
-describe("Range", () => {
- afterEach(() => cleanup());
-
- test("Release event called on blur and pointerUp", async () => {
- const results = await render(Range, {
- label: "range",
- show_label: true,
- value: 1,
- minimum: 0,
- maximum: 10
- });
-
- const numberInput = results.getAllByTestId("number-input")[0];
- const mock = vi.fn();
- results.component.$on("release", mock);
-
- fireEvent.pointerUp(numberInput);
-
- expect(mock).toHaveBeenCalledOnce();
-
- fireEvent.blur(numberInput);
- expect(mock).toHaveBeenCalledTimes(2);
- });
-});
diff --git a/js/app/src/components/Group/Group.test.ts b/js/group/Group.test.ts
similarity index 93%
rename from js/app/src/components/Group/Group.test.ts
rename to js/group/Group.test.ts
--- a/js/app/src/components/Group/Group.test.ts
+++ b/js/group/Group.test.ts
@@ -1,7 +1,7 @@
import { test, describe, assert, afterEach, vi } from "vitest";
import { cleanup, render } from "@gradio/tootils";
-import Group from "./Group.svelte";
+import Group from "./static";
describe("Group", () => {
afterEach(() => {
diff --git a/js/app/src/components/Image/Image.test.ts b/js/image/Image.test.ts
similarity index 89%
rename from js/app/src/components/Image/Image.test.ts
rename to js/image/Image.test.ts
--- a/js/app/src/components/Image/Image.test.ts
+++ b/js/image/Image.test.ts
@@ -9,10 +9,10 @@ import {
} from "vitest";
import { spy } from "tinyspy";
import { cleanup, render } from "@gradio/tootils";
-import { setupi18n } from "../../i18n";
+import { setupi18n } from "../app/src/i18n";
-import Image from "./Image.svelte";
-import type { LoadingStatus } from "../StatusTracker/types";
+import Image from "./index.svelte";
+import type { LoadingStatus } from "@gradio/statustracker/types";
const loading_status = {
eta: 0,
diff --git a/js/app/src/components/Radio/Radio.test.ts b/js/radio/Radio.test.ts
similarity index 94%
rename from js/app/src/components/Radio/Radio.test.ts
rename to js/radio/Radio.test.ts
--- a/js/app/src/components/Radio/Radio.test.ts
+++ b/js/radio/Radio.test.ts
@@ -3,8 +3,8 @@ import { test, describe, assert, afterEach } from "vitest";
import { cleanup, render } from "@gradio/tootils";
import event from "@testing-library/user-event";
-import Radio from "./Radio.svelte";
-import type { LoadingStatus } from "../StatusTracker/types";
+import Radio from "./index.svelte";
+import type { LoadingStatus } from "@gradio/statustracker/types";
const loading_status = {
eta: 0,
diff --git a/js/app/src/components/Textbox/Textbox.test.ts b/js/textbox/Textbox.test.ts
similarity index 93%
rename from js/app/src/components/Textbox/Textbox.test.ts
rename to js/textbox/Textbox.test.ts
--- a/js/app/src/components/Textbox/Textbox.test.ts
+++ b/js/textbox/Textbox.test.ts
@@ -3,8 +3,8 @@ import { spy } from "tinyspy";
import { cleanup, fireEvent, render, get_text, wait } from "@gradio/tootils";
import event from "@testing-library/user-event";
-import Textbox from "./Textbox.svelte";
-import type { LoadingStatus } from "../StatusTracker/types";
+import Textbox from "./index.svelte";
+import type { LoadingStatus } from "@gradio/statustracker/types";
const loading_status = {
eta: 0,
diff --git a/js/app/src/components/UploadButton/UploadButton.test.ts b/js/uploadbutton/UploadButton.test.ts
similarity index 92%
rename from js/app/src/components/UploadButton/UploadButton.test.ts
rename to js/uploadbutton/UploadButton.test.ts
--- a/js/app/src/components/UploadButton/UploadButton.test.ts
+++ b/js/uploadbutton/UploadButton.test.ts
@@ -2,9 +2,9 @@ import { test, describe, expect, vi, afterEach, assert } from "vitest";
import { spy, spyOn } from "tinyspy";
import { cleanup, render, wait_for_event } from "@gradio/tootils";
import event from "@testing-library/user-event";
-import { setupi18n } from "../../i18n";
-import type { LoadingStatus } from "../StatusTracker/types";
-import UploadButton from "./UploadButton.svelte";
+import { setupi18n } from "../app/src/i18n";
+import type { LoadingStatus } from "@gradio/statustracker/types";
+import UploadButton from "./index.svelte";
describe("UploadButton", () => {
afterEach(() => {
diff --git a/js/app/src/components/Video/Video.test.ts b/js/video/Video.test.ts
similarity index 97%
rename from js/app/src/components/Video/Video.test.ts
rename to js/video/Video.test.ts
--- a/js/app/src/components/Video/Video.test.ts
+++ b/js/video/Video.test.ts
@@ -10,10 +10,10 @@ import {
} from "vitest";
import { spy, spyOn } from "tinyspy";
import { cleanup, render } from "@gradio/tootils";
-import { setupi18n } from "../../i18n";
+import { setupi18n } from "../app/src/i18n";
-import Video from "./Video.svelte";
-import type { LoadingStatus } from "../StatusTracker/types";
+import Video from "./index.svelte";
+import type { LoadingStatus } from "@gradio/statustracker/types";
const loading_status = {
eta: 0,
diff --git a/test/README.md b/test/README.md
--- a/test/README.md
+++ b/test/README.md
@@ -2,7 +2,7 @@
- All the tests should test Backend functionalities. Frontend functionalities and e2e tests are done in Frontend.
- Make use of pytest fixtures whenever it is possible. With fixtures, objects with high initialize durations are reused within tests, ex. a client session.
-- All test_data resides within _gradio/test_data_ and all test_files resides within test/test_files.
+- All test*data resides within \_gradio/test_data* and all test_files resides within test/test_files.
- When doing network operations do not forget to make use of async to make tests faster.
- Have clear class and function naming within the tests.
- Short descriptions within test functions are great.
diff --git a/test/test_files/README.md b/test/test_files/README.md
--- a/test/test_files/README.md
+++ b/test/test_files/README.md
@@ -1,5 +1,6 @@
Files in this directory are used in:
-* tests for the gradio library
-* example inputs in the view API documentation
-Warning: please be careful when renaming / deleting files
\ No newline at end of file
+- tests for the gradio library
+- example inputs in the view API documentation
+
+Warning: please be careful when renaming / deleting files
| move component files into cleaner folder structure
first part of #3360.
| 2023-08-02T18:03:53 |
|
gradio-app/gradio | 5,104 | gradio-app__gradio-5104 | [
"5100"
] | 37caa2e0fe95d6cab8beb174580fb557904f137f | diff --git a/gradio/routes.py b/gradio/routes.py
--- a/gradio/routes.py
+++ b/gradio/routes.py
@@ -221,7 +221,7 @@ def app_id(request: fastapi.Request) -> dict:
@app.post("/login")
@app.post("/login/")
def login(form_data: OAuth2PasswordRequestForm = Depends()):
- username, password = form_data.username, form_data.password
+ username, password = form_data.username.strip(), form_data.password
if app.auth is None:
return RedirectResponse(url="/", status_code=status.HTTP_302_FOUND)
if (
| diff --git a/test/test_routes.py b/test/test_routes.py
--- a/test/test_routes.py
+++ b/test/test_routes.py
@@ -465,6 +465,12 @@ def test_post_login(self):
)
assert response.status_code == 400
+ response = client.post(
+ "/login",
+ data={"username": " test ", "password": "correct_password"},
+ )
+ assert response.status_code == 200
+
class TestQueueRoutes:
@pytest.mark.asyncio
| Sweep: Strip the user's username before login in routes.py. Add a unit test for this within test_post_login
It seems that spaces are kept at the beginning and at the end of the username's Textbox on login page.
Can you strip the username in the login route in the script gradio/routes.py?
We also need to add a unit test within test_post_login
| 2023-08-04T20:17:17 |
|
gradio-app/gradio | 5,232 | gradio-app__gradio-5232 | [
"4754"
] | b3e50db92f452f376aa2cc081326d40bb69d6dd7 | diff --git a/gradio/components/checkboxgroup.py b/gradio/components/checkboxgroup.py
--- a/gradio/components/checkboxgroup.py
+++ b/gradio/components/checkboxgroup.py
@@ -35,7 +35,7 @@ class CheckboxGroup(
def __init__(
self,
- choices: list[str | float | int] | None = None,
+ choices: list[str | int | float | tuple[str, str | int | float]] | None = None,
*,
value: list[str | float | int] | str | float | int | Callable | None = None,
type: Literal["value", "index"] = "value",
@@ -54,22 +54,26 @@ def __init__(
):
"""
Parameters:
- choices: list of (string or numeric) options to select from.
- value: default selected list of options. If a single choice is selected, it can be passed in as a string or numeric type. If callable, the function will be called whenever the app loads to set the initial value of the component.
+ choices: A list of string or numeric options to select from. An option can also be a tuple of the form (name, value), where name is the displayed name of the checkbox button and value is the value to be passed to the function, or returned by the function.
+ value: Default selected list of options. If a single choice is selected, it can be passed in as a string or numeric type. If callable, the function will be called whenever the app loads to set the initial value of the component.
type: Type of value to be returned by component. "value" returns the list of strings of the choices selected, "index" returns the list of indices of the choices selected.
- label: component name in interface.
- info: additional component description.
+ label: Component name in interface.
+ info: Additional component description.
every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
+ show_label: If True, will display label.
container: If True, will place the component in a container - providing some extra padding around the border.
- scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
- min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
- interactive: if True, choices in this checkbox group will be checkable; if False, checking will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
+ scale: Relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
+ min_width: Minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
+ interactive: If True, choices in this checkbox group will be checkable; if False, checking will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
visible: If False, component will be hidden.
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
"""
- self.choices = choices or []
+ self.choices = (
+ [c if isinstance(c, tuple) else (str(c), c) for c in choices]
+ if choices
+ else []
+ )
valid_types = ["value", "index"]
if type not in valid_types:
raise ValueError(
@@ -109,8 +113,8 @@ def get_config(self):
def example_inputs(self) -> dict[str, Any]:
return {
- "raw": self.choices[0] if self.choices else None,
- "serialized": self.choices[0] if self.choices else None,
+ "raw": [self.choices[0][1]] if self.choices else None,
+ "serialized": [self.choices[0][1]] if self.choices else None,
}
@staticmethod
@@ -119,7 +123,7 @@ def update(
| str
| Literal[_Keywords.NO_VALUE]
| None = _Keywords.NO_VALUE,
- choices: list[str] | None = None,
+ choices: list[str | int | float | tuple[str, str | int | float]] | None = None,
label: str | None = None,
info: str | None = None,
show_label: bool | None = None,
@@ -148,12 +152,12 @@ def preprocess(self, x: list[str | int | float]) -> list[str | int | float]:
Parameters:
x: list of selected choices
Returns:
- list of selected choices as strings or indices within choice list
+ list of selected choice values as strings or indices within choice list
"""
if self.type == "value":
return x
elif self.type == "index":
- return [self.choices.index(choice) for choice in x]
+ return [[value for _, value in self.choices].index(choice) for choice in x]
else:
raise ValueError(
f"Unknown type: {self.type}. Please choose from: 'value', 'index'."
@@ -163,9 +167,8 @@ def postprocess(
self, y: list[str | int | float] | str | int | float | None
) -> list[str | int | float]:
"""
- Any postprocessing needed to be performed on function output.
Parameters:
- y: List of selected choices. If a single choice is selected, it can be passed in as a string
+ y: List of selected choice values. If a single choice is selected, it can be passed in as a string
Returns:
List of selected choices
"""
@@ -177,7 +180,7 @@ def postprocess(
def get_interpretation_neighbors(self, x):
leave_one_out_sets = []
- for choice in self.choices:
+ for choice in [value for _, value in self.choices]:
leave_one_out_set = list(x)
if choice in leave_one_out_set:
leave_one_out_set.remove(choice)
@@ -192,7 +195,7 @@ def get_interpretation_scores(self, x, neighbors, scores, **kwargs):
For each tuple in the list, the first value represents the interpretation score if the input is False, and the second if the input is True.
"""
final_scores = []
- for choice, score in zip(self.choices, scores):
+ for choice, score in zip([value for _, value in self.choices], scores):
score_set = [score, None] if choice in x else [None, score]
final_scores.append(score_set)
return final_scores
@@ -213,3 +216,13 @@ def style(
if container is not None:
self.container = container
return self
+
+ def as_example(self, input_data):
+ if input_data is None:
+ return None
+ elif not isinstance(input_data, list):
+ input_data = [input_data]
+ return [
+ next((c[0] for c in self.choices if c[1] == data), None)
+ for data in input_data
+ ]
diff --git a/gradio/components/radio.py b/gradio/components/radio.py
--- a/gradio/components/radio.py
+++ b/gradio/components/radio.py
@@ -36,7 +36,7 @@ class Radio(
def __init__(
self,
- choices: list[str | int | float] | None = None,
+ choices: list[str | int | float | tuple[str, str | int | float]] | None = None,
*,
value: str | int | float | Callable | None = None,
type: str = "value",
@@ -55,22 +55,26 @@ def __init__(
):
"""
Parameters:
- choices: list of options to select from.
- value: the button selected by default. If None, no button is selected by default. If callable, the function will be called whenever the app loads to set the initial value of the component.
+ choices: A list of string or numeric options to select from. An option can also be a tuple of the form (name, value), where name is the displayed name of the radio button and value is the value to be passed to the function, or returned by the function.
+ value: The option selected by default. If None, no option is selected by default. If callable, the function will be called whenever the app loads to set the initial value of the component.
type: Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
- label: component name in interface.
- info: additional component description.
+ label: Component name in interface.
+ info: Additional component description.
every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
show_label: if True, will display label.
container: If True, will place the component in a container - providing some extra padding around the border.
- scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
- min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
- interactive: if True, choices in this radio group will be selectable; if False, selection will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
+ scale: Relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
+ min_width: Minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
+ interactive: If True, choices in this radio group will be selectable; if False, selection will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
visible: If False, component will be hidden.
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
"""
- self.choices = choices or []
+ self.choices = (
+ [c if isinstance(c, tuple) else (str(c), c) for c in choices]
+ if choices
+ else []
+ )
valid_types = ["value", "index"]
if type not in valid_types:
raise ValueError(
@@ -110,8 +114,8 @@ def get_config(self):
def example_inputs(self) -> dict[str, Any]:
return {
- "raw": self.choices[0] if self.choices else None,
- "serialized": self.choices[0] if self.choices else None,
+ "raw": self.choices[0][1] if self.choices else None,
+ "serialized": self.choices[0][1] if self.choices else None,
}
@staticmethod
@@ -121,7 +125,7 @@ def update(
| float
| Literal[_Keywords.NO_VALUE]
| None = _Keywords.NO_VALUE,
- choices: list[str | int | float] | None = None,
+ choices: list[str | int | float | tuple[str, str | int | float]] | None = None,
label: str | None = None,
info: str | None = None,
show_label: bool | None = None,
@@ -150,7 +154,7 @@ def preprocess(self, x: str | int | float | None) -> str | int | float | None:
Parameters:
x: selected choice
Returns:
- selected choice as string or index within choice list
+ value of the selected choice as string or index within choice list
"""
if self.type == "value":
return x
@@ -158,14 +162,14 @@ def preprocess(self, x: str | int | float | None) -> str | int | float | None:
if x is None:
return None
else:
- return self.choices.index(x)
+ return [value for _, value in self.choices].index(x)
else:
raise ValueError(
f"Unknown type: {self.type}. Please choose from: 'value', 'index'."
)
def get_interpretation_neighbors(self, x):
- choices = list(self.choices)
+ choices = [value for _, value in self.choices]
choices.remove(x)
return choices, {}
@@ -176,7 +180,8 @@ def get_interpretation_scores(
Returns:
Each value represents the interpretation score corresponding to each choice.
"""
- scores.insert(self.choices.index(x), None)
+ choices = [value for _, value in self.choices]
+ scores.insert(choices.index(x), None)
return scores
def style(
@@ -195,3 +200,6 @@ def style(
if container is not None:
self.container = container
return self
+
+ def as_example(self, input_data):
+ return next((c[0] for c in self.choices if c[1] == input_data), None)
diff --git a/gradio/utils.py b/gradio/utils.py
--- a/gradio/utils.py
+++ b/gradio/utils.py
@@ -10,6 +10,7 @@
import json.decoder
import os
import pkgutil
+import pprint
import random
import re
import time
@@ -175,6 +176,7 @@ def assert_configs_are_equivalent_besides_ids(
"""
config1 = copy.deepcopy(config1)
config2 = copy.deepcopy(config2)
+ pp = pprint.PrettyPrinter(indent=2)
for key in root_keys:
assert config1[key] == config2[key], f"Configs have different: {key}"
@@ -190,7 +192,7 @@ def assert_same_components(config1_id, config2_id):
c1.pop("id")
c2 = copy.deepcopy(c2)
c2.pop("id")
- assert c1 == c2, f"{c1} does not match {c2}"
+ assert c1 == c2, f"{pp.pprint(c1)} does not match {pp.pprint(c2)}"
def same_children_recursive(children1, chidren2):
for child1, child2 in zip(children1, chidren2):
| diff --git a/gradio/test_data/blocks_configs.py b/gradio/test_data/blocks_configs.py
--- a/gradio/test_data/blocks_configs.py
+++ b/gradio/test_data/blocks_configs.py
@@ -21,7 +21,11 @@
"id": 2,
"type": "checkboxgroup",
"props": {
- "choices": ["Covid", "Malaria", "Lung Cancer"],
+ "choices": [
+ ("Covid", "Covid"),
+ ("Malaria", "Malaria"),
+ ("Lung Cancer", "Lung Cancer"),
+ ],
"value": [],
"label": "Disease to Scan For",
"show_label": True,
@@ -35,7 +39,7 @@
"info": {"type": "array", "items": {"type": "string"}},
"serialized_info": False,
},
- "example_inputs": {"raw": "Covid", "serialized": "Covid"},
+ "example_inputs": {"raw": ["Covid"], "serialized": ["Covid"]},
},
{"id": 3, "type": "tabs", "props": {"visible": True}},
{"id": 4, "type": "tabitem", "props": {"label": "X-ray", "visible": True}},
@@ -346,7 +350,11 @@
"id": 7,
"type": "checkboxgroup",
"props": {
- "choices": ["Covid", "Malaria", "Lung Cancer"],
+ "choices": [
+ ("Covid", "Covid"),
+ ("Malaria", "Malaria"),
+ ("Lung Cancer", "Lung Cancer"),
+ ],
"value": [],
"label": "Disease to Scan For",
"show_label": True,
@@ -360,7 +368,7 @@
"info": {"type": "array", "items": {"type": "string"}},
"serialized_info": False,
},
- "example_inputs": {"raw": "Covid", "serialized": "Covid"},
+ "example_inputs": {"raw": ["Covid"], "serialized": ["Covid"]},
},
{"id": 8, "type": "tabs", "props": {"visible": True}},
{"id": 9, "type": "tabitem", "props": {"label": "X-ray", "visible": True}},
@@ -668,7 +676,11 @@
"id": 2,
"type": "checkboxgroup",
"props": {
- "choices": ["Covid", "Malaria", "Lung Cancer"],
+ "choices": [
+ ("Covid", "Covid"),
+ ("Malaria", "Malaria"),
+ ("Lung Cancer", "Lung Cancer"),
+ ],
"value": [],
"name": "checkboxgroup",
"show_label": True,
diff --git a/js/radio/Radio.test.ts b/js/radio/Radio.test.ts
--- a/js/radio/Radio.test.ts
+++ b/js/radio/Radio.test.ts
@@ -19,7 +19,11 @@ const loading_status = {
describe("Radio", () => {
afterEach(() => cleanup());
- const choices = ["dog", "cat", "turtle"];
+ const choices = [
+ ["dog", "dog"],
+ ["cat", "cat"],
+ ["turtle", "turtle"]
+ ];
test("renders provided value", async () => {
const { getAllByRole, getByTestId } = await render(Radio, {
@@ -31,17 +35,16 @@ describe("Radio", () => {
mode: "dynamic"
});
- const radioButtons: HTMLOptionElement[] = getAllByRole("radio");
-
assert.equal(
getByTestId("cat-radio-label").className.includes("selected"),
true
);
+ const radioButtons: HTMLOptionElement[] = getAllByRole("radio");
assert.equal(radioButtons.length, 3);
radioButtons.forEach((radioButton: HTMLOptionElement, index) => {
- assert.equal(radioButton.value === choices[index], true);
+ assert.equal(radioButton.value === choices[index][1], true);
});
});
diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -502,7 +502,7 @@ def test_component_functions(self):
label="Check Your Inputs",
)
assert checkboxes_input.get_config() == {
- "choices": ["a", "b", "c"],
+ "choices": [("a", "a"), ("b", "b"), ("c", "c")],
"value": ["a", "c"],
"name": "checkboxgroup",
"show_label": True,
@@ -548,7 +548,7 @@ def test_component_functions(self):
choices=["a", "b", "c"], default="a", label="Pick Your One Input"
)
assert radio_input.get_config() == {
- "choices": ["a", "b", "c"],
+ "choices": [("a", "a"), ("b", "b"), ("c", "c")],
"value": None,
"name": "radio",
"show_label": True,
@@ -697,7 +697,6 @@ def test_component_functions(self):
with pytest.raises(ValueError):
gr.Image(type="unknown")
image_input.shape = (30, 10)
- assert image_input._segment_by_slic(img) is not None
# Output functionalities
y_img = gr.processing_utils.decode_base64_to_image(
@@ -2036,9 +2035,9 @@ async def test_in_interface(self):
def get_avg_age_per_gender(data):
return {
- "M": int(data[data["gender"] == "M"].mean()),
- "F": int(data[data["gender"] == "F"].mean()),
- "O": int(data[data["gender"] == "O"].mean()),
+ "M": int(data[data["gender"] == "M"]["age"].mean()),
+ "F": int(data[data["gender"] == "F"]["age"].mean()),
+ "O": int(data[data["gender"] == "O"]["age"].mean()),
}
iface = gr.Interface(
@@ -2375,13 +2374,6 @@ def test_no_color(self):
assert config["encoding"]["x"]["field"] == "Horsepower"
assert config["encoding"]["x"]["title"] == "Horse"
assert config["encoding"]["y"]["field"] == "Miles_per_Gallon"
- assert config["selection"] == {
- "selector001": {
- "bind": "scales",
- "encodings": ["x", "y"],
- "type": "interval",
- }
- }
assert config["title"] == "Car Data"
assert "height" not in config
assert "width" not in config
| `gr.Radio` and `gr.CheckboxGroup` choices display text.
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
Usually for the `gr.Radio` and `gr.CheckboxGroup`, the choices are different for the program to process and UI display to the user. For example, in a program, it might be easier to have choices named like ["openai_completion", "openai_chat"], but it would be great to have the UI display as ["OpenAI (Completion)", "OpenAI (Chat)"].
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
It would be great if there could be an additional argument
e.g. `choices_display` for `gr.Radio` and `gr.CheckboxGroup` that can be used specifically for UI display.
**Additional context**
Add any other context or screenshots about the feature request here.
| Similar to #3019, we should probably tackle them together | 2023-08-15T14:53:57 |
gradio-app/gradio | 5,235 | gradio-app__gradio-5235 | [
"5229"
] | 87f1c2b4ac7c685c43477215fa5b96b6cbeffa05 | diff --git a/gradio/components/video.py b/gradio/components/video.py
--- a/gradio/components/video.py
+++ b/gradio/components/video.py
@@ -239,6 +239,8 @@ def preprocess(
return output_file_name
elif not self.include_audio:
output_file_name = str(file_name.with_name(f"muted_{file_name.name}"))
+ if Path(output_file_name).exists():
+ return output_file_name
if wasm_utils.IS_WASM:
raise wasm_utils.WasmUnsupportedError(
"include_audio=False is not supported in the Wasm mode."
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -1332,6 +1332,11 @@ def test_component_functions(self):
output2 = video_input.preprocess(x_video)
assert output1 == output2
+ video_input = gr.Video(include_audio=False)
+ output1 = video_input.preprocess(x_video)
+ output2 = video_input.preprocess(x_video)
+ assert output1 == output2
+
video_input = gr.Video(label="Upload Your Video")
assert video_input.get_config() == {
"autoplay": False,
| [Video] Video component does not check if file already exists when `include_audio=False`
### Describe the bug
The video component does not check if the file already exists when `include_audio=False` which causes an error if the same video file is uploaded twice
```
File '/tmp/gradio/87bf2b9d4da7170c8f8fe9673e7780effe24e254/muted_01.mp4' already exists. Overwrite? [y/N] Not overwriting - exiting
Traceback (most recent call last):
File "/home/artem.kotov/mambaforge/envs/control/lib/python3.10/site-packages/gradio/routes.py", line 488, in run_predict
output = await app.get_blocks().process_api(
File "/home/artem.kotov/mambaforge/envs/control/lib/python3.10/site-packages/gradio/blocks.py", line 1428, in process_api
inputs = self.preprocess_data(fn_index, inputs, state)
File "/home/artem.kotov/mambaforge/envs/control/lib/python3.10/site-packages/gradio/blocks.py", line 1245, in preprocess_data
processed_input.append(block.preprocess(inputs[i]))
File "/home/artem.kotov/mambaforge/envs/control/lib/python3.10/site-packages/gradio/components/video.py", line 250, in preprocess
ff.run()
File "/home/artem.kotov/mambaforge/envs/control/lib/python3.10/site-packages/ffmpy.py", line 106, in run
raise FFRuntimeError(self.cmd, self.process.returncode, out[0], out[1])
ffmpy.FFRuntimeError: `ffmpeg -i /tmp/gradio/87bf2b9d4da7170c8f8fe9673e7780effe24e254/01.mp4 -an /home/artem.kotov/tmp/gradio/87bf2b9d4da7170c8f8fe9673e7780effe24e254/muted_01.mp4` exited with status 1
```
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
```
def identity(input_video):
return input_video
with gr.Blocks() as demo:
input_video = gr.Video(source="upload", width=384, include_audio=False)
result_video = gr.Video(label="Output", width=384)
run_button = gr.Button(label="Run")
run_button.click(fn=identity, inputs=[input_video], outputs=[result_video])
demo.queue().launch()
```
then try to upload the same video twice: the first time everything should be working just fine but the second time the error occurs.
### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
Gradio Environment Information:
------------------------------
Operating System: Linux
gradio version: 3.40.1
gradio_client version: 0.4.0
------------------------------------------------
gradio dependencies in your environment:
aiofiles: 23.1.0
aiohttp: 3.8.4
altair: 5.0.1
fastapi: 0.98.0
ffmpy: 0.3.0
gradio-client: 0.4.0
httpx: 0.24.1
huggingface-hub: 0.15.1
importlib-resources: 6.0.1
jinja2: 3.1.2
markdown-it-py: 2.2.0
markupsafe: 2.1.1
matplotlib: 3.7.1
mdit-py-plugins: 0.3.3
numpy: 1.25.0
orjson: 3.9.1
packaging: 23.0
pandas: 1.5.3
pillow: 9.4.0
pydantic: 1.10.9
pydub: 0.25.1
python-multipart: 0.0.6
pyyaml: 6.0
requests: 2.31.0
semantic-version: 2.10.0
typing-extensions: 4.7.1
uvicorn: 0.22.0
websockets: 11.0.3
authlib; extra == 'oauth' is not installed.
itsdangerous; extra == 'oauth' is not installed.
gradio_client dependencies in your environment:
fsspec: 2023.6.0
httpx: 0.24.1
huggingface-hub: 0.15.1
packaging: 23.0
requests: 2.31.0
typing-extensions: 4.7.1
websockets: 11.0.3
```
### Severity
I can work around it
| Can confirm the issue, thanks @breengles! Would you be interested in opening a PR to fix this? | 2023-08-15T17:43:00 |
gradio-app/gradio | 5,254 | gradio-app__gradio-5254 | [
"5251"
] | c57d4c232a97e03b4671f9e9edc3af456438fe89 | diff --git a/gradio/components/checkboxgroup.py b/gradio/components/checkboxgroup.py
--- a/gradio/components/checkboxgroup.py
+++ b/gradio/components/checkboxgroup.py
@@ -133,6 +133,11 @@ def update(
interactive: bool | None = None,
visible: bool | None = None,
):
+ choices = (
+ None
+ if choices is None
+ else [c if isinstance(c, tuple) else (str(c), c) for c in choices]
+ )
return {
"choices": choices,
"label": label,
diff --git a/gradio/components/radio.py b/gradio/components/radio.py
--- a/gradio/components/radio.py
+++ b/gradio/components/radio.py
@@ -135,6 +135,11 @@ def update(
interactive: bool | None = None,
visible: bool | None = None,
):
+ choices = (
+ None
+ if choices is None
+ else [c if isinstance(c, tuple) else (str(c), c) for c in choices]
+ )
return {
"choices": choices,
"label": label,
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -581,6 +581,16 @@ async def test_in_interface(self):
scores = (await iface.interpret(["b"]))[0]["interpretation"]
assert scores == [-2.0, None, 2.0]
+ def test_update(self):
+ update = gr.Radio.update(
+ choices=[("zeroth", ""), "first", "second"], label="ordinal"
+ )
+ assert update["choices"] == [
+ ("zeroth", ""),
+ ("first", "first"),
+ ("second", "second"),
+ ]
+
class TestDropdown:
def test_component_functions(self):
| gr.CheckboxGroup.update(choices=..) not working as expected
### Describe the bug
This only happens on main and I believe it's because of the recent tuple addition in #5232
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
```python
import gradio as gr
choices = ["First Choice", "Second Choice", "Third Choice"]
with gr.Blocks() as demo:
chx = gr.CheckboxGroup(choices=choices, interactive=True)
btn = gr.Button(value="Select All")
btn.click(lambda: gr.CheckboxGroup.update(choices=["first", "second"]), None, chx)
demo.launch()
```
### Screenshot
After clicking the button
<img width="1301" alt="image" src="https://github.com/gradio-app/gradio/assets/41651716/874fd613-1e22-4dfe-b788-83269f22b3b3">
### Logs
_No response_
### System Info
```shell
main
```
### Severity
Blocking usage of gradio
| ah yeah okay I know the issue -- thanks for flagging @freddyaboulton | 2023-08-17T18:46:32 |
gradio-app/gradio | 5,279 | gradio-app__gradio-5279 | [
"4841"
] | cf167cd1dd4acd9aee225ff1cb6fac0e849806ba | diff --git a/demo/event_trigger/run.py b/demo/event_trigger/run.py
--- a/demo/event_trigger/run.py
+++ b/demo/event_trigger/run.py
@@ -69,9 +69,13 @@ def video_pause():
def video_stop():
print("video_stop")
+ def video_end():
+ print("video_end")
+
video1.play(fn=video_play)
video1.pause(fn=video_pause)
video1.stop(fn=video_stop)
+ video1.end(fn=video_end)
radio1.change(fn=change_video, inputs=radio1, outputs=video1)
video1.change(fn=alert_change, inputs=[gr.State("Video"), video1])
diff --git a/demo/latex/run.py b/demo/latex/run.py
--- a/demo/latex/run.py
+++ b/demo/latex/run.py
@@ -13,8 +13,7 @@
the first is $\gamma^2 + \theta^2 = \omega^2$
- $\sqrt{x^2+1}
- $ is next
+ $\sqrt{x^2+1}$ is next
Integral $\int_{a}^{b} x^2 \,dx$ is last
diff --git a/demo/markdown_example/run.py b/demo/markdown_example/run.py
--- a/demo/markdown_example/run.py
+++ b/demo/markdown_example/run.py
@@ -168,6 +168,23 @@
127.0.0.1:8000
```
+```python
+import gradio as gr
+
+gr.Blocks() as demo:
+ gr.Markdown(value=md)
+
+demo.launch()
+```
+
+```js
+function fancyAlert(arg) {
+ if(arg) {
+ $.facebox({div:'#foo'})
+ }
+}
+```
+
## License
MIT
| diff --git a/js/app/test/components.test.ts b/js/app/test/components.test.ts
--- a/js/app/test/components.test.ts
+++ b/js/app/test/components.test.ts
@@ -240,11 +240,9 @@ describe("all components should have the appropriate label when set via the `lab
});
describe("all components should hide their label when `show_label=false`", () => {
- components
- .filter(([name]) => name !== "Markdown" && name !== "HTML")
- .forEach(([name, component, props]) => {
- test.todo(name);
- });
+ components.forEach(([name, component, props]) => {
+ test.todo(name);
+ });
["Button", "Code", "Image", "Plot"].forEach((name) => {
test.todo(name);
@@ -252,11 +250,9 @@ describe("all components should hide their label when `show_label=false`", () =>
});
describe("all components should show their label when `show_label=true`", () => {
- components
- .filter(([name]) => name !== "Markdown" && name !== "HTML")
- .forEach(([name, component, props]) => {
- test.todo(name);
- });
+ components.forEach(([name, component, props]) => {
+ test.todo(name);
+ });
["Button", "Code", "Image", "Plot"].forEach((name) => {
test.todo(name);
@@ -264,11 +260,9 @@ describe("all components should show their label when `show_label=true`", () =>
});
describe("all components should hide their container when `container=false`", () => {
- components
- .filter(([name]) => name !== "Markdown" && name !== "HTML")
- .forEach(([name, component, props]) => {
- test.todo(name);
- });
+ components.forEach(([name, component, props]) => {
+ test.todo(name);
+ });
["Button", "Code", "Image", "Plot"].forEach((name) => {
test.todo(name);
diff --git a/js/image/Image.test.ts b/js/image/Image.test.ts
--- a/js/image/Image.test.ts
+++ b/js/image/Image.test.ts
@@ -34,21 +34,25 @@ describe("Image", () => {
afterEach(() => cleanup());
test("image change event trigger fires when value is changed and only fires once", async () => {
- const { component } = await render(Image, {
+ const { component, listen } = await render(Image, {
show_label: true,
loading_status,
- mode: "dynamic",
value:
"https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png",
- root: "foo",
- root_url: null,
streaming: false,
pending: false,
- source: "upload"
+ source: "upload",
+ label: "Test Label",
+ width: 224,
+ height: 224,
+ mirror_webcam: false,
+ shape: [224, 224],
+ brush_color: "#000000",
+ brush_radius: 5,
+ mask_opacity: 0.5
});
- const mock = spy();
- component.$on("change", mock);
+ const mock = listen("change");
component.value =
"https://github.com/gradio-app/gradio/blob/main/test/test_files/cheetah1.jpg";
diff --git a/js/textbox/Textbox.test.ts b/js/textbox/Textbox.test.ts
--- a/js/textbox/Textbox.test.ts
+++ b/js/textbox/Textbox.test.ts
@@ -6,14 +6,15 @@ import event from "@testing-library/user-event";
import Textbox from "./interactive";
import type { LoadingStatus } from "@gradio/statustracker";
-const loading_status = {
+const loading_status: LoadingStatus = {
eta: 0,
queue_position: 1,
queue_size: 1,
status: "complete" as LoadingStatus["status"],
scroll_to_output: false,
visible: true,
- fn_index: 0
+ fn_index: 0,
+ show_progress: "full"
};
describe("Textbox", () => {
@@ -25,7 +26,6 @@ describe("Textbox", () => {
max_lines: 1,
loading_status,
lines: 1,
- mode: "dynamic",
value: "hello world",
label: "Textbox"
});
@@ -37,20 +37,18 @@ describe("Textbox", () => {
});
test("changing the text should update the value", async () => {
- const { component, getByDisplayValue } = await render(Textbox, {
+ const { component, getByDisplayValue, listen } = await render(Textbox, {
show_label: true,
max_lines: 10,
loading_status,
lines: 1,
- mode: "dynamic",
value: "hi ",
label: "Textbox"
});
const item: HTMLInputElement = getByDisplayValue("hi") as HTMLInputElement;
- const mock = spy();
- component.$on("change", mock);
+ const mock = listen("change");
item.focus();
await event.keyboard("some text");
@@ -58,6 +56,6 @@ describe("Textbox", () => {
assert.equal(item.value, "hi some text");
assert.equal(component.value, "hi some text");
assert.equal(mock.callCount, 9);
- assert.equal(mock.calls[8][0].detail, "hi some text");
+ assert.equal(mock.calls[8][0].detail.data, "hi some text");
});
});
diff --git a/js/uploadbutton/UploadButton.test.ts b/js/uploadbutton/UploadButton.test.ts
--- a/js/uploadbutton/UploadButton.test.ts
+++ b/js/uploadbutton/UploadButton.test.ts
@@ -25,7 +25,6 @@ describe("UploadButton", () => {
const { getByTestId } = await render(UploadButton, {
label: "file",
value: null,
- mode: "dynamic",
root: "http://localhost:7860",
file_count: "1"
});
@@ -47,18 +46,20 @@ describe("UploadButton", () => {
await import("@gradio/client");
setupi18n();
- const { component, getByTestId } = await render(UploadButton, {
- label: "file",
- value: null,
- mode: "dynamic",
- root: "http://localhost:7860",
- file_count: "1"
- });
+ const { component, getByTestId, wait_for_event } = await render(
+ UploadButton,
+ {
+ label: "file",
+ value: null,
+ root: "http://localhost:7860",
+ file_count: "1"
+ }
+ );
const item = getByTestId("file-upload-button"); //container.querySelectorAll("input")[0];
const file = new File(["hello"], "my-audio.wav", { type: "audio/wav" });
event.upload(item, file);
- const mock = await wait_for_event(component, "change");
+ const mock = await wait_for_event("change");
expect(mock.callCount).toBe(1);
const [data] = component.$capture_state().value;
expect(data).toBeTruthy();
diff --git a/js/video/Video.test.ts b/js/video/Video.test.ts
--- a/js/video/Video.test.ts
+++ b/js/video/Video.test.ts
@@ -55,7 +55,7 @@ describe("Video", () => {
name: "bar",
source: "upload"
});
- let vid = getByTestId("Test Label-player") as HTMLVideoElement
+ let vid = getByTestId("Test Label-player") as HTMLVideoElement;
assert.equal(
vid.src,
"https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav"
@@ -101,7 +101,7 @@ describe("Video", () => {
name: "bar",
source: "upload"
});
- let vid = getByTestId("test-player") as HTMLVideoElement
+ let vid = getByTestId("test-player") as HTMLVideoElement;
assert.equal(
vid.src,
"https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav"
@@ -252,7 +252,7 @@ describe("Video", () => {
});
test("video change event trigger fires when value is changed and only fires once", async () => {
- const { component } = await render(InteractiveVideo, {
+ const { component, listen } = await render(InteractiveVideo, {
show_label: true,
loading_status,
mode: "dynamic",
@@ -271,8 +271,7 @@ describe("Video", () => {
autoplay: true
});
- const mock = spy();
- component.$on("change", mock);
+ const mock = listen("change");
(component.value = [
{
diff --git a/test/test_files/xray_config.json b/test/test_files/xray_config.json
--- a/test/test_files/xray_config.json
+++ b/test/test_files/xray_config.json
@@ -1,476 +1,449 @@
{
- "version": "3.40.1",
- "mode": "blocks",
- "dev_mode": true,
- "analytics_enabled": true,
- "components": [
- {
- "id": 1,
- "type": "markdown",
- "props": {
- "value": "# Detect Disease From Scan\nWith this model you can lorem ipsum\n- ipsum 1\n- ipsum 2",
- "rtl": false,
- "latex_delimiters": [
- {
- "left": "$",
- "right": "$",
- "display": false
- }
- ],
- "name": "markdown",
- "visible": true
- },
- "serializer": "StringSerializable",
- "api_info": {
- "info": {
- "type": "string"
- },
- "serialized_info": false
- },
- "example_inputs": {
- "raw": "Howdy!",
- "serialized": "Howdy!"
- }
- },
- {
- "id": 2,
- "type": "checkboxgroup",
- "props": {
- "choices": [
- [
- "Covid",
- "Covid"
- ],
- [
- "Malaria",
- "Malaria"
- ],
- [
- "Lung Cancer",
- "Lung Cancer"
- ]
- ],
- "value": [],
- "label": "Disease to Scan For",
- "show_label": true,
- "container": true,
- "min_width": 160,
- "name": "checkboxgroup",
- "visible": true
- },
- "serializer": "ListStringSerializable",
- "api_info": {
- "info": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "serialized_info": false
- },
- "example_inputs": {
- "raw": [
- "Covid"
- ],
- "serialized": [
- "Covid"
- ]
- }
- },
- {
- "id": 3,
- "type": "tabs",
- "props": {
- "visible": true
- }
- },
- {
- "id": 4,
- "type": "tabitem",
- "props": {
- "label": "X-ray",
- "visible": true
- }
- },
- {
- "id": 5,
- "type": "row",
- "props": {
- "type": "row",
- "variant": "default",
- "equal_height": true,
- "visible": true
- }
- },
- {
- "id": 6,
- "type": "image",
- "props": {
- "image_mode": "RGB",
- "source": "upload",
- "tool": "editor",
- "streaming": false,
- "mirror_webcam": true,
- "brush_color": "#000000",
- "mask_opacity": 0.7,
- "selectable": false,
- "show_share_button": false,
- "show_download_button": true,
- "show_label": true,
- "container": true,
- "min_width": 160,
- "name": "image",
- "visible": true
- },
- "serializer": "ImgSerializable",
- "api_info": {
- "info": {
- "type": "string",
- "description": "base64 representation of an image"
- },
- "serialized_info": true
- },
- "example_inputs": {
- "raw": "data:image/png;base64,R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==",
- "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"
- }
- },
- {
- "id": 7,
- "type": "json",
- "props": {
- "show_label": true,
- "container": true,
- "min_width": 160,
- "name": "json",
- "visible": true
- },
- "serializer": "JSONSerializable",
- "api_info": {
- "info": {
- "type": {},
- "description": "any valid json"
- },
- "serialized_info": true
- },
- "example_inputs": {
- "raw": {
- "a": 1,
- "b": 2
- },
- "serialized": null
- }
- },
- {
- "id": 8,
- "type": "button",
- "props": {
- "value": "Run",
- "variant": "secondary",
- "interactive": true,
- "name": "button",
- "visible": true
- },
- "serializer": "StringSerializable",
- "api_info": {
- "info": {
- "type": "string"
- },
- "serialized_info": false
- },
- "example_inputs": {
- "raw": "Howdy!",
- "serialized": "Howdy!"
- }
- },
- {
- "id": 9,
- "type": "tabitem",
- "props": {
- "label": "CT Scan",
- "visible": true
- }
- },
- {
- "id": 10,
- "type": "row",
- "props": {
- "type": "row",
- "variant": "default",
- "equal_height": true,
- "visible": true
- }
- },
- {
- "id": 11,
- "type": "image",
- "props": {
- "image_mode": "RGB",
- "source": "upload",
- "tool": "editor",
- "streaming": false,
- "mirror_webcam": true,
- "brush_color": "#000000",
- "mask_opacity": 0.7,
- "selectable": false,
- "show_share_button": false,
- "show_download_button": true,
- "show_label": true,
- "container": true,
- "min_width": 160,
- "name": "image",
- "visible": true
- },
- "serializer": "ImgSerializable",
- "api_info": {
- "info": {
- "type": "string",
- "description": "base64 representation of an image"
- },
- "serialized_info": true
- },
- "example_inputs": {
- "raw": "data:image/png;base64,R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==",
- "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"
- }
- },
- {
- "id": 12,
- "type": "json",
- "props": {
- "show_label": true,
- "container": true,
- "min_width": 160,
- "name": "json",
- "visible": true
- },
- "serializer": "JSONSerializable",
- "api_info": {
- "info": {
- "type": {},
- "description": "any valid json"
- },
- "serialized_info": true
- },
- "example_inputs": {
- "raw": {
- "a": 1,
- "b": 2
- },
- "serialized": null
- }
- },
- {
- "id": 13,
- "type": "button",
- "props": {
- "value": "Run",
- "variant": "secondary",
- "interactive": true,
- "name": "button",
- "visible": true
- },
- "serializer": "StringSerializable",
- "api_info": {
- "info": {
- "type": "string"
- },
- "serialized_info": false
- },
- "example_inputs": {
- "raw": "Howdy!",
- "serialized": "Howdy!"
- }
- },
- {
- "id": 14,
- "type": "textbox",
- "props": {
- "lines": 1,
- "max_lines": 20,
- "value": "",
- "type": "text",
- "autofocus": false,
- "show_copy_button": false,
- "container": true,
- "rtl": false,
- "show_label": true,
- "min_width": 160,
- "name": "textbox",
- "visible": true
- },
- "serializer": "StringSerializable",
- "api_info": {
- "info": {
- "type": "string"
- },
- "serialized_info": false
- },
- "example_inputs": {
- "raw": "Howdy!",
- "serialized": "Howdy!"
- }
- },
- {
- "id": 15,
- "type": "form",
- "props": {
- "type": "form",
- "scale": 0,
- "min_width": 0,
- "visible": true
- }
- },
- {
- "id": 16,
- "type": "form",
- "props": {
- "type": "form",
- "scale": 0,
- "min_width": 0,
- "visible": true
- }
- }
- ],
- "css": null,
- "title": "Gradio",
- "space_id": null,
- "enable_queue": null,
- "show_error": true,
- "show_api": true,
- "is_colab": false,
- "stylesheets": [
- "https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap",
- "https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;600&display=swap"
- ],
- "theme": "default",
- "layout": {
- "id": 0,
- "children": [
- {
- "id": 1
- },
- {
- "id": 15,
- "children": [
- {
- "id": 2
- }
- ]
- },
- {
- "id": 3,
- "children": [
- {
- "id": 4,
- "children": [
- {
- "id": 5,
- "children": [
- {
- "id": 6
- },
- {
- "id": 7
- }
- ]
- },
- {
- "id": 8
- }
- ]
- },
- {
- "id": 9,
- "children": [
- {
- "id": 10,
- "children": [
- {
- "id": 11
- },
- {
- "id": 12
- }
- ]
- },
- {
- "id": 13
- }
- ]
- }
- ]
- },
- {
- "id": 16,
- "children": [
- {
- "id": 14
- }
- ]
- }
- ]
- },
- "dependencies": [
- {
- "targets": [
- 8
- ],
- "trigger": "click",
- "inputs": [
- 2,
- 6
- ],
- "outputs": [
- 7
- ],
- "backend_fn": true,
- "js": null,
- "queue": null,
- "api_name": null,
- "scroll_to_output": false,
- "show_progress": "full",
- "every": null,
- "batch": false,
- "max_batch_size": 4,
- "cancels": [],
- "types": {
- "continuous": false,
- "generator": false
- },
- "collects_event_data": false,
- "trigger_after": null,
- "trigger_only_on_success": false
- },
- {
- "targets": [
- 13
- ],
- "trigger": "click",
- "inputs": [
- 2,
- 11
- ],
- "outputs": [
- 12
- ],
- "backend_fn": true,
- "js": null,
- "queue": null,
- "api_name": null,
- "scroll_to_output": false,
- "show_progress": "full",
- "every": null,
- "batch": false,
- "max_batch_size": 4,
- "cancels": [],
- "types": {
- "continuous": false,
- "generator": false
- },
- "collects_event_data": false,
- "trigger_after": null,
- "trigger_only_on_success": false
- }
- ]
-}
\ No newline at end of file
+ "version": "3.40.1",
+ "mode": "blocks",
+ "dev_mode": true,
+ "analytics_enabled": true,
+ "components": [
+ {
+ "id": 1,
+ "type": "markdown",
+ "props": {
+ "value": "# Detect Disease From Scan\nWith this model you can lorem ipsum\n- ipsum 1\n- ipsum 2",
+ "rtl": false,
+ "latex_delimiters": [
+ {
+ "left": "$",
+ "right": "$",
+ "display": false
+ }
+ ],
+ "name": "markdown",
+ "visible": true
+ },
+ "serializer": "StringSerializable",
+ "api_info": {
+ "info": {
+ "type": "string"
+ },
+ "serialized_info": false
+ },
+ "example_inputs": {
+ "raw": "Howdy!",
+ "serialized": "Howdy!"
+ }
+ },
+ {
+ "id": 2,
+ "type": "checkboxgroup",
+ "props": {
+ "choices": [
+ ["Covid", "Covid"],
+ ["Malaria", "Malaria"],
+ ["Lung Cancer", "Lung Cancer"]
+ ],
+ "value": [],
+ "label": "Disease to Scan For",
+ "show_label": true,
+ "container": true,
+ "min_width": 160,
+ "name": "checkboxgroup",
+ "visible": true
+ },
+ "serializer": "ListStringSerializable",
+ "api_info": {
+ "info": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "serialized_info": false
+ },
+ "example_inputs": {
+ "raw": ["Covid"],
+ "serialized": ["Covid"]
+ }
+ },
+ {
+ "id": 3,
+ "type": "tabs",
+ "props": {
+ "visible": true
+ }
+ },
+ {
+ "id": 4,
+ "type": "tabitem",
+ "props": {
+ "label": "X-ray",
+ "visible": true
+ }
+ },
+ {
+ "id": 5,
+ "type": "row",
+ "props": {
+ "type": "row",
+ "variant": "default",
+ "equal_height": true,
+ "visible": true
+ }
+ },
+ {
+ "id": 6,
+ "type": "image",
+ "props": {
+ "image_mode": "RGB",
+ "source": "upload",
+ "tool": "editor",
+ "streaming": false,
+ "mirror_webcam": true,
+ "brush_color": "#000000",
+ "mask_opacity": 0.7,
+ "selectable": false,
+ "show_share_button": false,
+ "show_download_button": true,
+ "show_label": true,
+ "container": true,
+ "min_width": 160,
+ "name": "image",
+ "visible": true
+ },
+ "serializer": "ImgSerializable",
+ "api_info": {
+ "info": {
+ "type": "string",
+ "description": "base64 representation of an image"
+ },
+ "serialized_info": true
+ },
+ "example_inputs": {
+ "raw": "data:image/png;base64,R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==",
+ "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"
+ }
+ },
+ {
+ "id": 7,
+ "type": "json",
+ "props": {
+ "show_label": true,
+ "container": true,
+ "min_width": 160,
+ "name": "json",
+ "visible": true
+ },
+ "serializer": "JSONSerializable",
+ "api_info": {
+ "info": {
+ "type": {},
+ "description": "any valid json"
+ },
+ "serialized_info": true
+ },
+ "example_inputs": {
+ "raw": {
+ "a": 1,
+ "b": 2
+ },
+ "serialized": null
+ }
+ },
+ {
+ "id": 8,
+ "type": "button",
+ "props": {
+ "value": "Run",
+ "variant": "secondary",
+ "interactive": true,
+ "name": "button",
+ "visible": true
+ },
+ "serializer": "StringSerializable",
+ "api_info": {
+ "info": {
+ "type": "string"
+ },
+ "serialized_info": false
+ },
+ "example_inputs": {
+ "raw": "Howdy!",
+ "serialized": "Howdy!"
+ }
+ },
+ {
+ "id": 9,
+ "type": "tabitem",
+ "props": {
+ "label": "CT Scan",
+ "visible": true
+ }
+ },
+ {
+ "id": 10,
+ "type": "row",
+ "props": {
+ "type": "row",
+ "variant": "default",
+ "equal_height": true,
+ "visible": true
+ }
+ },
+ {
+ "id": 11,
+ "type": "image",
+ "props": {
+ "image_mode": "RGB",
+ "source": "upload",
+ "tool": "editor",
+ "streaming": false,
+ "mirror_webcam": true,
+ "brush_color": "#000000",
+ "mask_opacity": 0.7,
+ "selectable": false,
+ "show_share_button": false,
+ "show_download_button": true,
+ "show_label": true,
+ "container": true,
+ "min_width": 160,
+ "name": "image",
+ "visible": true
+ },
+ "serializer": "ImgSerializable",
+ "api_info": {
+ "info": {
+ "type": "string",
+ "description": "base64 representation of an image"
+ },
+ "serialized_info": true
+ },
+ "example_inputs": {
+ "raw": "data:image/png;base64,R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==",
+ "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"
+ }
+ },
+ {
+ "id": 12,
+ "type": "json",
+ "props": {
+ "show_label": true,
+ "container": true,
+ "min_width": 160,
+ "name": "json",
+ "visible": true
+ },
+ "serializer": "JSONSerializable",
+ "api_info": {
+ "info": {
+ "type": {},
+ "description": "any valid json"
+ },
+ "serialized_info": true
+ },
+ "example_inputs": {
+ "raw": {
+ "a": 1,
+ "b": 2
+ },
+ "serialized": null
+ }
+ },
+ {
+ "id": 13,
+ "type": "button",
+ "props": {
+ "value": "Run",
+ "variant": "secondary",
+ "interactive": true,
+ "name": "button",
+ "visible": true
+ },
+ "serializer": "StringSerializable",
+ "api_info": {
+ "info": {
+ "type": "string"
+ },
+ "serialized_info": false
+ },
+ "example_inputs": {
+ "raw": "Howdy!",
+ "serialized": "Howdy!"
+ }
+ },
+ {
+ "id": 14,
+ "type": "textbox",
+ "props": {
+ "lines": 1,
+ "max_lines": 20,
+ "value": "",
+ "type": "text",
+ "autofocus": false,
+ "show_copy_button": false,
+ "container": true,
+ "rtl": false,
+ "show_label": true,
+ "min_width": 160,
+ "name": "textbox",
+ "visible": true
+ },
+ "serializer": "StringSerializable",
+ "api_info": {
+ "info": {
+ "type": "string"
+ },
+ "serialized_info": false
+ },
+ "example_inputs": {
+ "raw": "Howdy!",
+ "serialized": "Howdy!"
+ }
+ },
+ {
+ "id": 15,
+ "type": "form",
+ "props": {
+ "type": "form",
+ "scale": 0,
+ "min_width": 0,
+ "visible": true
+ }
+ },
+ {
+ "id": 16,
+ "type": "form",
+ "props": {
+ "type": "form",
+ "scale": 0,
+ "min_width": 0,
+ "visible": true
+ }
+ }
+ ],
+ "css": null,
+ "title": "Gradio",
+ "space_id": null,
+ "enable_queue": null,
+ "show_error": true,
+ "show_api": true,
+ "is_colab": false,
+ "stylesheets": [
+ "https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap",
+ "https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;600&display=swap"
+ ],
+ "theme": "default",
+ "layout": {
+ "id": 0,
+ "children": [
+ {
+ "id": 1
+ },
+ {
+ "id": 15,
+ "children": [
+ {
+ "id": 2
+ }
+ ]
+ },
+ {
+ "id": 3,
+ "children": [
+ {
+ "id": 4,
+ "children": [
+ {
+ "id": 5,
+ "children": [
+ {
+ "id": 6
+ },
+ {
+ "id": 7
+ }
+ ]
+ },
+ {
+ "id": 8
+ }
+ ]
+ },
+ {
+ "id": 9,
+ "children": [
+ {
+ "id": 10,
+ "children": [
+ {
+ "id": 11
+ },
+ {
+ "id": 12
+ }
+ ]
+ },
+ {
+ "id": 13
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": 16,
+ "children": [
+ {
+ "id": 14
+ }
+ ]
+ }
+ ]
+ },
+ "dependencies": [
+ {
+ "targets": [8],
+ "trigger": "click",
+ "inputs": [2, 6],
+ "outputs": [7],
+ "backend_fn": true,
+ "js": null,
+ "queue": null,
+ "api_name": null,
+ "scroll_to_output": false,
+ "show_progress": "full",
+ "every": null,
+ "batch": false,
+ "max_batch_size": 4,
+ "cancels": [],
+ "types": {
+ "continuous": false,
+ "generator": false
+ },
+ "collects_event_data": false,
+ "trigger_after": null,
+ "trigger_only_on_success": false
+ },
+ {
+ "targets": [13],
+ "trigger": "click",
+ "inputs": [2, 11],
+ "outputs": [12],
+ "backend_fn": true,
+ "js": null,
+ "queue": null,
+ "api_name": null,
+ "scroll_to_output": false,
+ "show_progress": "full",
+ "every": null,
+ "batch": false,
+ "max_batch_size": 4,
+ "cancels": [],
+ "types": {
+ "continuous": false,
+ "generator": false
+ },
+ "collects_event_data": false,
+ "trigger_after": null,
+ "trigger_only_on_success": false
+ }
+ ]
+}
diff --git a/test/test_files/xray_config_diff_ids.json b/test/test_files/xray_config_diff_ids.json
--- a/test/test_files/xray_config_diff_ids.json
+++ b/test/test_files/xray_config_diff_ids.json
@@ -1,476 +1,449 @@
{
- "version": "3.40.1",
- "mode": "blocks",
- "dev_mode": true,
- "analytics_enabled": true,
- "components": [
- {
- "id": 101,
- "type": "markdown",
- "props": {
- "value": "# Detect Disease From Scan\nWith this model you can lorem ipsum\n- ipsum 1\n- ipsum 2",
- "rtl": false,
- "latex_delimiters": [
- {
- "left": "$",
- "right": "$",
- "display": false
- }
- ],
- "name": "markdown",
- "visible": true
- },
- "serializer": "StringSerializable",
- "api_info": {
- "info": {
- "type": "string"
- },
- "serialized_info": false
- },
- "example_inputs": {
- "raw": "Howdy!",
- "serialized": "Howdy!"
- }
- },
- {
- "id": 102,
- "type": "checkboxgroup",
- "props": {
- "choices": [
- [
- "Covid",
- "Covid"
- ],
- [
- "Malaria",
- "Malaria"
- ],
- [
- "Lung Cancer",
- "Lung Cancer"
- ]
- ],
- "value": [],
- "label": "Disease to Scan For",
- "show_label": true,
- "container": true,
- "min_width": 160,
- "name": "checkboxgroup",
- "visible": true
- },
- "serializer": "ListStringSerializable",
- "api_info": {
- "info": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "serialized_info": false
- },
- "example_inputs": {
- "raw": [
- "Covid"
- ],
- "serialized": [
- "Covid"
- ]
- }
- },
- {
- "id": 103,
- "type": "tabs",
- "props": {
- "visible": true
- }
- },
- {
- "id": 104,
- "type": "tabitem",
- "props": {
- "label": "X-ray",
- "visible": true
- }
- },
- {
- "id": 105,
- "type": "row",
- "props": {
- "type": "row",
- "variant": "default",
- "equal_height": true,
- "visible": true
- }
- },
- {
- "id": 106,
- "type": "image",
- "props": {
- "image_mode": "RGB",
- "source": "upload",
- "tool": "editor",
- "streaming": false,
- "mirror_webcam": true,
- "brush_color": "#000000",
- "mask_opacity": 0.7,
- "selectable": false,
- "show_share_button": false,
- "show_download_button": true,
- "show_label": true,
- "container": true,
- "min_width": 160,
- "name": "image",
- "visible": true
- },
- "serializer": "ImgSerializable",
- "api_info": {
- "info": {
- "type": "string",
- "description": "base64 representation of an image"
- },
- "serialized_info": true
- },
- "example_inputs": {
- "raw": "data:image/png;base64,R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==",
- "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"
- }
- },
- {
- "id": 107,
- "type": "json",
- "props": {
- "show_label": true,
- "container": true,
- "min_width": 160,
- "name": "json",
- "visible": true
- },
- "serializer": "JSONSerializable",
- "api_info": {
- "info": {
- "type": {},
- "description": "any valid json"
- },
- "serialized_info": true
- },
- "example_inputs": {
- "raw": {
- "a": 1,
- "b": 2
- },
- "serialized": null
- }
- },
- {
- "id": 108,
- "type": "button",
- "props": {
- "value": "Run",
- "variant": "secondary",
- "interactive": true,
- "name": "button",
- "visible": true
- },
- "serializer": "StringSerializable",
- "api_info": {
- "info": {
- "type": "string"
- },
- "serialized_info": false
- },
- "example_inputs": {
- "raw": "Howdy!",
- "serialized": "Howdy!"
- }
- },
- {
- "id": 109,
- "type": "tabitem",
- "props": {
- "label": "CT Scan",
- "visible": true
- }
- },
- {
- "id": 110,
- "type": "row",
- "props": {
- "type": "row",
- "variant": "default",
- "equal_height": true,
- "visible": true
- }
- },
- {
- "id": 111,
- "type": "image",
- "props": {
- "image_mode": "RGB",
- "source": "upload",
- "tool": "editor",
- "streaming": false,
- "mirror_webcam": true,
- "brush_color": "#000000",
- "mask_opacity": 0.7,
- "selectable": false,
- "show_share_button": false,
- "show_download_button": true,
- "show_label": true,
- "container": true,
- "min_width": 160,
- "name": "image",
- "visible": true
- },
- "serializer": "ImgSerializable",
- "api_info": {
- "info": {
- "type": "string",
- "description": "base64 representation of an image"
- },
- "serialized_info": true
- },
- "example_inputs": {
- "raw": "data:image/png;base64,R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==",
- "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"
- }
- },
- {
- "id": 112,
- "type": "json",
- "props": {
- "show_label": true,
- "container": true,
- "min_width": 160,
- "name": "json",
- "visible": true
- },
- "serializer": "JSONSerializable",
- "api_info": {
- "info": {
- "type": {},
- "description": "any valid json"
- },
- "serialized_info": true
- },
- "example_inputs": {
- "raw": {
- "a": 1,
- "b": 2
- },
- "serialized": null
- }
- },
- {
- "id": 113,
- "type": "button",
- "props": {
- "value": "Run",
- "variant": "secondary",
- "interactive": true,
- "name": "button",
- "visible": true
- },
- "serializer": "StringSerializable",
- "api_info": {
- "info": {
- "type": "string"
- },
- "serialized_info": false
- },
- "example_inputs": {
- "raw": "Howdy!",
- "serialized": "Howdy!"
- }
- },
- {
- "id": 114,
- "type": "textbox",
- "props": {
- "lines": 1,
- "max_lines": 20,
- "value": "",
- "type": "text",
- "autofocus": false,
- "show_copy_button": false,
- "container": true,
- "rtl": false,
- "show_label": true,
- "min_width": 160,
- "name": "textbox",
- "visible": true
- },
- "serializer": "StringSerializable",
- "api_info": {
- "info": {
- "type": "string"
- },
- "serialized_info": false
- },
- "example_inputs": {
- "raw": "Howdy!",
- "serialized": "Howdy!"
- }
- },
- {
- "id": 115,
- "type": "form",
- "props": {
- "type": "form",
- "scale": 0,
- "min_width": 0,
- "visible": true
- }
- },
- {
- "id": 116,
- "type": "form",
- "props": {
- "type": "form",
- "scale": 0,
- "min_width": 0,
- "visible": true
- }
- }
- ],
- "css": null,
- "title": "Gradio",
- "space_id": null,
- "enable_queue": null,
- "show_error": true,
- "show_api": true,
- "is_colab": false,
- "stylesheets": [
- "https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap",
- "https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;600&display=swap"
- ],
- "theme": "default",
- "layout": {
- "id": 100,
- "children": [
- {
- "id": 101
- },
- {
- "id": 115,
- "children": [
- {
- "id": 102
- }
- ]
- },
- {
- "id": 103,
- "children": [
- {
- "id": 104,
- "children": [
- {
- "id": 105,
- "children": [
- {
- "id": 106
- },
- {
- "id": 107
- }
- ]
- },
- {
- "id": 108
- }
- ]
- },
- {
- "id": 109,
- "children": [
- {
- "id": 110,
- "children": [
- {
- "id": 111
- },
- {
- "id": 112
- }
- ]
- },
- {
- "id": 113
- }
- ]
- }
- ]
- },
- {
- "id": 116,
- "children": [
- {
- "id": 114
- }
- ]
- }
- ]
- },
- "dependencies": [
- {
- "targets": [
- 108
- ],
- "trigger": "click",
- "inputs": [
- 102,
- 106
- ],
- "outputs": [
- 107
- ],
- "backend_fn": true,
- "js": null,
- "queue": null,
- "api_name": null,
- "scroll_to_output": false,
- "show_progress": "full",
- "every": null,
- "batch": false,
- "max_batch_size": 4,
- "cancels": [],
- "types": {
- "continuous": false,
- "generator": false
- },
- "collects_event_data": false,
- "trigger_after": null,
- "trigger_only_on_success": false
- },
- {
- "targets": [
- 113
- ],
- "trigger": "click",
- "inputs": [
- 102,
- 111
- ],
- "outputs": [
- 112
- ],
- "backend_fn": true,
- "js": null,
- "queue": null,
- "api_name": null,
- "scroll_to_output": false,
- "show_progress": "full",
- "every": null,
- "batch": false,
- "max_batch_size": 4,
- "cancels": [],
- "types": {
- "continuous": false,
- "generator": false
- },
- "collects_event_data": false,
- "trigger_after": null,
- "trigger_only_on_success": false
- }
- ]
-}
\ No newline at end of file
+ "version": "3.40.1",
+ "mode": "blocks",
+ "dev_mode": true,
+ "analytics_enabled": true,
+ "components": [
+ {
+ "id": 101,
+ "type": "markdown",
+ "props": {
+ "value": "# Detect Disease From Scan\nWith this model you can lorem ipsum\n- ipsum 1\n- ipsum 2",
+ "rtl": false,
+ "latex_delimiters": [
+ {
+ "left": "$",
+ "right": "$",
+ "display": false
+ }
+ ],
+ "name": "markdown",
+ "visible": true
+ },
+ "serializer": "StringSerializable",
+ "api_info": {
+ "info": {
+ "type": "string"
+ },
+ "serialized_info": false
+ },
+ "example_inputs": {
+ "raw": "Howdy!",
+ "serialized": "Howdy!"
+ }
+ },
+ {
+ "id": 102,
+ "type": "checkboxgroup",
+ "props": {
+ "choices": [
+ ["Covid", "Covid"],
+ ["Malaria", "Malaria"],
+ ["Lung Cancer", "Lung Cancer"]
+ ],
+ "value": [],
+ "label": "Disease to Scan For",
+ "show_label": true,
+ "container": true,
+ "min_width": 160,
+ "name": "checkboxgroup",
+ "visible": true
+ },
+ "serializer": "ListStringSerializable",
+ "api_info": {
+ "info": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "serialized_info": false
+ },
+ "example_inputs": {
+ "raw": ["Covid"],
+ "serialized": ["Covid"]
+ }
+ },
+ {
+ "id": 103,
+ "type": "tabs",
+ "props": {
+ "visible": true
+ }
+ },
+ {
+ "id": 104,
+ "type": "tabitem",
+ "props": {
+ "label": "X-ray",
+ "visible": true
+ }
+ },
+ {
+ "id": 105,
+ "type": "row",
+ "props": {
+ "type": "row",
+ "variant": "default",
+ "equal_height": true,
+ "visible": true
+ }
+ },
+ {
+ "id": 106,
+ "type": "image",
+ "props": {
+ "image_mode": "RGB",
+ "source": "upload",
+ "tool": "editor",
+ "streaming": false,
+ "mirror_webcam": true,
+ "brush_color": "#000000",
+ "mask_opacity": 0.7,
+ "selectable": false,
+ "show_share_button": false,
+ "show_download_button": true,
+ "show_label": true,
+ "container": true,
+ "min_width": 160,
+ "name": "image",
+ "visible": true
+ },
+ "serializer": "ImgSerializable",
+ "api_info": {
+ "info": {
+ "type": "string",
+ "description": "base64 representation of an image"
+ },
+ "serialized_info": true
+ },
+ "example_inputs": {
+ "raw": "data:image/png;base64,R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==",
+ "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"
+ }
+ },
+ {
+ "id": 107,
+ "type": "json",
+ "props": {
+ "show_label": true,
+ "container": true,
+ "min_width": 160,
+ "name": "json",
+ "visible": true
+ },
+ "serializer": "JSONSerializable",
+ "api_info": {
+ "info": {
+ "type": {},
+ "description": "any valid json"
+ },
+ "serialized_info": true
+ },
+ "example_inputs": {
+ "raw": {
+ "a": 1,
+ "b": 2
+ },
+ "serialized": null
+ }
+ },
+ {
+ "id": 108,
+ "type": "button",
+ "props": {
+ "value": "Run",
+ "variant": "secondary",
+ "interactive": true,
+ "name": "button",
+ "visible": true
+ },
+ "serializer": "StringSerializable",
+ "api_info": {
+ "info": {
+ "type": "string"
+ },
+ "serialized_info": false
+ },
+ "example_inputs": {
+ "raw": "Howdy!",
+ "serialized": "Howdy!"
+ }
+ },
+ {
+ "id": 109,
+ "type": "tabitem",
+ "props": {
+ "label": "CT Scan",
+ "visible": true
+ }
+ },
+ {
+ "id": 110,
+ "type": "row",
+ "props": {
+ "type": "row",
+ "variant": "default",
+ "equal_height": true,
+ "visible": true
+ }
+ },
+ {
+ "id": 111,
+ "type": "image",
+ "props": {
+ "image_mode": "RGB",
+ "source": "upload",
+ "tool": "editor",
+ "streaming": false,
+ "mirror_webcam": true,
+ "brush_color": "#000000",
+ "mask_opacity": 0.7,
+ "selectable": false,
+ "show_share_button": false,
+ "show_download_button": true,
+ "show_label": true,
+ "container": true,
+ "min_width": 160,
+ "name": "image",
+ "visible": true
+ },
+ "serializer": "ImgSerializable",
+ "api_info": {
+ "info": {
+ "type": "string",
+ "description": "base64 representation of an image"
+ },
+ "serialized_info": true
+ },
+ "example_inputs": {
+ "raw": "data:image/png;base64,R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==",
+ "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"
+ }
+ },
+ {
+ "id": 112,
+ "type": "json",
+ "props": {
+ "show_label": true,
+ "container": true,
+ "min_width": 160,
+ "name": "json",
+ "visible": true
+ },
+ "serializer": "JSONSerializable",
+ "api_info": {
+ "info": {
+ "type": {},
+ "description": "any valid json"
+ },
+ "serialized_info": true
+ },
+ "example_inputs": {
+ "raw": {
+ "a": 1,
+ "b": 2
+ },
+ "serialized": null
+ }
+ },
+ {
+ "id": 113,
+ "type": "button",
+ "props": {
+ "value": "Run",
+ "variant": "secondary",
+ "interactive": true,
+ "name": "button",
+ "visible": true
+ },
+ "serializer": "StringSerializable",
+ "api_info": {
+ "info": {
+ "type": "string"
+ },
+ "serialized_info": false
+ },
+ "example_inputs": {
+ "raw": "Howdy!",
+ "serialized": "Howdy!"
+ }
+ },
+ {
+ "id": 114,
+ "type": "textbox",
+ "props": {
+ "lines": 1,
+ "max_lines": 20,
+ "value": "",
+ "type": "text",
+ "autofocus": false,
+ "show_copy_button": false,
+ "container": true,
+ "rtl": false,
+ "show_label": true,
+ "min_width": 160,
+ "name": "textbox",
+ "visible": true
+ },
+ "serializer": "StringSerializable",
+ "api_info": {
+ "info": {
+ "type": "string"
+ },
+ "serialized_info": false
+ },
+ "example_inputs": {
+ "raw": "Howdy!",
+ "serialized": "Howdy!"
+ }
+ },
+ {
+ "id": 115,
+ "type": "form",
+ "props": {
+ "type": "form",
+ "scale": 0,
+ "min_width": 0,
+ "visible": true
+ }
+ },
+ {
+ "id": 116,
+ "type": "form",
+ "props": {
+ "type": "form",
+ "scale": 0,
+ "min_width": 0,
+ "visible": true
+ }
+ }
+ ],
+ "css": null,
+ "title": "Gradio",
+ "space_id": null,
+ "enable_queue": null,
+ "show_error": true,
+ "show_api": true,
+ "is_colab": false,
+ "stylesheets": [
+ "https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap",
+ "https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;600&display=swap"
+ ],
+ "theme": "default",
+ "layout": {
+ "id": 100,
+ "children": [
+ {
+ "id": 101
+ },
+ {
+ "id": 115,
+ "children": [
+ {
+ "id": 102
+ }
+ ]
+ },
+ {
+ "id": 103,
+ "children": [
+ {
+ "id": 104,
+ "children": [
+ {
+ "id": 105,
+ "children": [
+ {
+ "id": 106
+ },
+ {
+ "id": 107
+ }
+ ]
+ },
+ {
+ "id": 108
+ }
+ ]
+ },
+ {
+ "id": 109,
+ "children": [
+ {
+ "id": 110,
+ "children": [
+ {
+ "id": 111
+ },
+ {
+ "id": 112
+ }
+ ]
+ },
+ {
+ "id": 113
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": 116,
+ "children": [
+ {
+ "id": 114
+ }
+ ]
+ }
+ ]
+ },
+ "dependencies": [
+ {
+ "targets": [108],
+ "trigger": "click",
+ "inputs": [102, 106],
+ "outputs": [107],
+ "backend_fn": true,
+ "js": null,
+ "queue": null,
+ "api_name": null,
+ "scroll_to_output": false,
+ "show_progress": "full",
+ "every": null,
+ "batch": false,
+ "max_batch_size": 4,
+ "cancels": [],
+ "types": {
+ "continuous": false,
+ "generator": false
+ },
+ "collects_event_data": false,
+ "trigger_after": null,
+ "trigger_only_on_success": false
+ },
+ {
+ "targets": [113],
+ "trigger": "click",
+ "inputs": [102, 111],
+ "outputs": [112],
+ "backend_fn": true,
+ "js": null,
+ "queue": null,
+ "api_name": null,
+ "scroll_to_output": false,
+ "show_progress": "full",
+ "every": null,
+ "batch": false,
+ "max_batch_size": 4,
+ "cancels": [],
+ "types": {
+ "continuous": false,
+ "generator": false
+ },
+ "collects_event_data": false,
+ "trigger_after": null,
+ "trigger_only_on_success": false
+ }
+ ]
+}
diff --git a/test/test_files/xray_config_wrong.json b/test/test_files/xray_config_wrong.json
--- a/test/test_files/xray_config_wrong.json
+++ b/test/test_files/xray_config_wrong.json
@@ -1,548 +1,519 @@
{
- "version": "3.40.1",
- "mode": "blocks",
- "dev_mode": true,
- "analytics_enabled": true,
- "components": [
- {
- "id": 1,
- "type": "markdown",
- "props": {
- "value": "# Detect Disease From Scan\nWith this model you can lorem ipsum\n- ipsum 1\n- ipsum 2",
- "rtl": false,
- "latex_delimiters": [
- {
- "left": "$",
- "right": "$",
- "display": false
- }
- ],
- "name": "markdown",
- "visible": true
- },
- "serializer": "StringSerializable",
- "api_info": {
- "info": {
- "type": "string"
- },
- "serialized_info": false
- },
- "example_inputs": {
- "raw": "Howdy!",
- "serialized": "Howdy!"
- }
- },
- {
- "id": 2,
- "type": "checkboxgroup",
- "props": {
- "choices": [
- [
- "Covid",
- "Covid"
- ],
- [
- "Malaria",
- "Malaria"
- ],
- [
- "Lung Cancer",
- "Lung Cancer"
- ]
- ],
- "value": [],
- "label": "Disease to Scan For",
- "show_label": true,
- "container": true,
- "min_width": 160,
- "name": "checkboxgroup",
- "visible": true
- },
- "serializer": "ListStringSerializable",
- "api_info": {
- "info": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "serialized_info": false
- },
- "example_inputs": {
- "raw": [
- "Covid"
- ],
- "serialized": [
- "Covid"
- ]
- }
- },
- {
- "id": 3,
- "type": "tabs",
- "props": {
- "visible": true
- }
- },
- {
- "id": 4,
- "type": "tabitem",
- "props": {
- "label": "X-ray",
- "visible": true
- }
- },
- {
- "id": 5,
- "type": "row",
- "props": {
- "type": "row",
- "variant": "default",
- "equal_height": true,
- "visible": true
- }
- },
- {
- "id": 6,
- "type": "image",
- "props": {
- "image_mode": "RGB",
- "source": "upload",
- "tool": "editor",
- "streaming": false,
- "mirror_webcam": true,
- "brush_color": "#000000",
- "mask_opacity": 0.7,
- "selectable": false,
- "show_share_button": false,
- "show_download_button": true,
- "show_label": true,
- "container": true,
- "min_width": 160,
- "name": "image",
- "visible": true
- },
- "serializer": "ImgSerializable",
- "api_info": {
- "info": {
- "type": "string",
- "description": "base64 representation of an image"
- },
- "serialized_info": true
- },
- "example_inputs": {
- "raw": "data:image/png;base64,R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==",
- "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"
- }
- },
- {
- "id": 7,
- "type": "json",
- "props": {
- "show_label": true,
- "container": true,
- "min_width": 160,
- "name": "json",
- "visible": true
- },
- "serializer": "JSONSerializable",
- "api_info": {
- "info": {
- "type": {},
- "description": "any valid json"
- },
- "serialized_info": true
- },
- "example_inputs": {
- "raw": {
- "a": 1,
- "b": 2
- },
- "serialized": null
- }
- },
- {
- "id": 8,
- "type": "button",
- "props": {
- "value": "Run",
- "variant": "secondary",
- "interactive": true,
- "name": "button",
- "visible": true
- },
- "serializer": "StringSerializable",
- "api_info": {
- "info": {
- "type": "string"
- },
- "serialized_info": false
- },
- "example_inputs": {
- "raw": "Howdy!",
- "serialized": "Howdy!"
- }
- },
- {
- "id": 9,
- "type": "tabitem",
- "props": {
- "label": "CT Scan",
- "visible": true
- }
- },
- {
- "id": 10,
- "type": "row",
- "props": {
- "type": "row",
- "variant": "default",
- "equal_height": true,
- "visible": true
- }
- },
- {
- "id": 11,
- "type": "image",
- "props": {
- "image_mode": "RGB",
- "source": "upload",
- "tool": "editor",
- "streaming": false,
- "mirror_webcam": true,
- "brush_color": "#000000",
- "mask_opacity": 0.7,
- "selectable": false,
- "show_share_button": false,
- "show_download_button": true,
- "show_label": true,
- "container": true,
- "min_width": 160,
- "name": "image",
- "visible": true
- },
- "serializer": "ImgSerializable",
- "api_info": {
- "info": {
- "type": "string",
- "description": "base64 representation of an image"
- },
- "serialized_info": true
- },
- "example_inputs": {
- "raw": "data:image/png;base64,R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==",
- "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"
- }
- },
- {
- "id": 12,
- "type": "json",
- "props": {
- "show_label": true,
- "container": true,
- "min_width": 160,
- "name": "json",
- "visible": true
- },
- "serializer": "JSONSerializable",
- "api_info": {
- "info": {
- "type": {},
- "description": "any valid json"
- },
- "serialized_info": true
- },
- "example_inputs": {
- "raw": {
- "a": 1,
- "b": 2
- },
- "serialized": null
- }
- },
- {
- "id": 13,
- "type": "button",
- "props": {
- "value": "Run",
- "variant": "secondary",
- "interactive": true,
- "name": "button",
- "visible": true
- },
- "serializer": "StringSerializable",
- "api_info": {
- "info": {
- "type": "string"
- },
- "serialized_info": false
- },
- "example_inputs": {
- "raw": "Howdy!",
- "serialized": "Howdy!"
- }
- },
- {
- "id": 14,
- "type": "textbox",
- "props": {
- "lines": 1,
- "max_lines": 20,
- "value": "",
- "type": "text",
- "autofocus": false,
- "show_copy_button": false,
- "container": true,
- "rtl": false,
- "show_label": true,
- "min_width": 160,
- "name": "textbox",
- "visible": true
- },
- "serializer": "StringSerializable",
- "api_info": {
- "info": {
- "type": "string"
- },
- "serialized_info": false
- },
- "example_inputs": {
- "raw": "Howdy!",
- "serialized": "Howdy!"
- }
- },
- {
- "id": 15,
- "type": "form",
- "props": {
- "type": "form",
- "scale": 0,
- "min_width": 0,
- "visible": true
- }
- },
- {
- "id": 16,
- "type": "form",
- "props": {
- "type": "form",
- "scale": 0,
- "min_width": 0,
- "visible": true
- }
- },
- {
- "id": 118,
- "type": "textbox",
- "props": {
- "lines": 1,
- "max_lines": 20,
- "value": "",
- "type": "text",
- "autofocus": false,
- "show_copy_button": false,
- "container": true,
- "rtl": false,
- "show_label": true,
- "min_width": 160,
- "name": "textbox",
- "visible": true
- },
- "serializer": "StringSerializable",
- "api_info": {
- "info": {
- "type": "string"
- },
- "serialized_info": false
- },
- "example_inputs": {
- "raw": "Howdy!",
- "serialized": "Howdy!"
- }
- },
- {
- "id": 119,
- "type": "form",
- "props": {
- "type": "form",
- "scale": 0,
- "min_width": 0,
- "visible": true
- }
- }
- ],
- "css": null,
- "title": "Gradio",
- "space_id": null,
- "enable_queue": null,
- "show_error": true,
- "show_api": true,
- "is_colab": false,
- "stylesheets": [
- "https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap",
- "https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;600&display=swap"
- ],
- "theme": "default",
- "layout": {
- "id": 117,
- "children": [
- {
- "id": 1
- },
- {
- "id": 15,
- "children": [
- {
- "id": 2
- }
- ]
- },
- {
- "id": 3,
- "children": [
- {
- "id": 4,
- "children": [
- {
- "id": 5,
- "children": [
- {
- "id": 6
- },
- {
- "id": 7
- }
- ]
- },
- {
- "id": 8
- }
- ]
- },
- {
- "id": 9,
- "children": [
- {
- "id": 10,
- "children": [
- {
- "id": 11
- },
- {
- "id": 12
- }
- ]
- },
- {
- "id": 13
- }
- ]
- }
- ]
- },
- {
- "id": 16,
- "children": [
- {
- "id": 14
- }
- ]
- },
- {
- "id": 119,
- "children": [
- {
- "id": 118
- }
- ]
- }
- ]
- },
- "dependencies": [
- {
- "targets": [
- 8
- ],
- "trigger": "click",
- "inputs": [
- 2,
- 6
- ],
- "outputs": [
- 7
- ],
- "backend_fn": true,
- "js": null,
- "queue": null,
- "api_name": null,
- "scroll_to_output": false,
- "show_progress": "full",
- "every": null,
- "batch": false,
- "max_batch_size": 4,
- "cancels": [],
- "types": {
- "continuous": false,
- "generator": false
- },
- "collects_event_data": false,
- "trigger_after": null,
- "trigger_only_on_success": false
- },
- {
- "targets": [
- 13
- ],
- "trigger": "click",
- "inputs": [
- 2,
- 11
- ],
- "outputs": [
- 12
- ],
- "backend_fn": true,
- "js": null,
- "queue": null,
- "api_name": null,
- "scroll_to_output": false,
- "show_progress": "full",
- "every": null,
- "batch": false,
- "max_batch_size": 4,
- "cancels": [],
- "types": {
- "continuous": false,
- "generator": false
- },
- "collects_event_data": false,
- "trigger_after": null,
- "trigger_only_on_success": false
- },
- {
- "targets": [],
- "trigger": "load",
- "inputs": [],
- "outputs": [
- 118
- ],
- "backend_fn": true,
- "js": null,
- "queue": null,
- "api_name": null,
- "scroll_to_output": false,
- "show_progress": "full",
- "every": null,
- "batch": false,
- "max_batch_size": 4,
- "cancels": [],
- "types": {
- "continuous": false,
- "generator": false
- },
- "collects_event_data": false,
- "trigger_after": null,
- "trigger_only_on_success": false
- }
- ]
-}
\ No newline at end of file
+ "version": "3.40.1",
+ "mode": "blocks",
+ "dev_mode": true,
+ "analytics_enabled": true,
+ "components": [
+ {
+ "id": 1,
+ "type": "markdown",
+ "props": {
+ "value": "# Detect Disease From Scan\nWith this model you can lorem ipsum\n- ipsum 1\n- ipsum 2",
+ "rtl": false,
+ "latex_delimiters": [
+ {
+ "left": "$",
+ "right": "$",
+ "display": false
+ }
+ ],
+ "name": "markdown",
+ "visible": true
+ },
+ "serializer": "StringSerializable",
+ "api_info": {
+ "info": {
+ "type": "string"
+ },
+ "serialized_info": false
+ },
+ "example_inputs": {
+ "raw": "Howdy!",
+ "serialized": "Howdy!"
+ }
+ },
+ {
+ "id": 2,
+ "type": "checkboxgroup",
+ "props": {
+ "choices": [
+ ["Covid", "Covid"],
+ ["Malaria", "Malaria"],
+ ["Lung Cancer", "Lung Cancer"]
+ ],
+ "value": [],
+ "label": "Disease to Scan For",
+ "show_label": true,
+ "container": true,
+ "min_width": 160,
+ "name": "checkboxgroup",
+ "visible": true
+ },
+ "serializer": "ListStringSerializable",
+ "api_info": {
+ "info": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "serialized_info": false
+ },
+ "example_inputs": {
+ "raw": ["Covid"],
+ "serialized": ["Covid"]
+ }
+ },
+ {
+ "id": 3,
+ "type": "tabs",
+ "props": {
+ "visible": true
+ }
+ },
+ {
+ "id": 4,
+ "type": "tabitem",
+ "props": {
+ "label": "X-ray",
+ "visible": true
+ }
+ },
+ {
+ "id": 5,
+ "type": "row",
+ "props": {
+ "type": "row",
+ "variant": "default",
+ "equal_height": true,
+ "visible": true
+ }
+ },
+ {
+ "id": 6,
+ "type": "image",
+ "props": {
+ "image_mode": "RGB",
+ "source": "upload",
+ "tool": "editor",
+ "streaming": false,
+ "mirror_webcam": true,
+ "brush_color": "#000000",
+ "mask_opacity": 0.7,
+ "selectable": false,
+ "show_share_button": false,
+ "show_download_button": true,
+ "show_label": true,
+ "container": true,
+ "min_width": 160,
+ "name": "image",
+ "visible": true
+ },
+ "serializer": "ImgSerializable",
+ "api_info": {
+ "info": {
+ "type": "string",
+ "description": "base64 representation of an image"
+ },
+ "serialized_info": true
+ },
+ "example_inputs": {
+ "raw": "data:image/png;base64,R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==",
+ "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"
+ }
+ },
+ {
+ "id": 7,
+ "type": "json",
+ "props": {
+ "show_label": true,
+ "container": true,
+ "min_width": 160,
+ "name": "json",
+ "visible": true
+ },
+ "serializer": "JSONSerializable",
+ "api_info": {
+ "info": {
+ "type": {},
+ "description": "any valid json"
+ },
+ "serialized_info": true
+ },
+ "example_inputs": {
+ "raw": {
+ "a": 1,
+ "b": 2
+ },
+ "serialized": null
+ }
+ },
+ {
+ "id": 8,
+ "type": "button",
+ "props": {
+ "value": "Run",
+ "variant": "secondary",
+ "interactive": true,
+ "name": "button",
+ "visible": true
+ },
+ "serializer": "StringSerializable",
+ "api_info": {
+ "info": {
+ "type": "string"
+ },
+ "serialized_info": false
+ },
+ "example_inputs": {
+ "raw": "Howdy!",
+ "serialized": "Howdy!"
+ }
+ },
+ {
+ "id": 9,
+ "type": "tabitem",
+ "props": {
+ "label": "CT Scan",
+ "visible": true
+ }
+ },
+ {
+ "id": 10,
+ "type": "row",
+ "props": {
+ "type": "row",
+ "variant": "default",
+ "equal_height": true,
+ "visible": true
+ }
+ },
+ {
+ "id": 11,
+ "type": "image",
+ "props": {
+ "image_mode": "RGB",
+ "source": "upload",
+ "tool": "editor",
+ "streaming": false,
+ "mirror_webcam": true,
+ "brush_color": "#000000",
+ "mask_opacity": 0.7,
+ "selectable": false,
+ "show_share_button": false,
+ "show_download_button": true,
+ "show_label": true,
+ "container": true,
+ "min_width": 160,
+ "name": "image",
+ "visible": true
+ },
+ "serializer": "ImgSerializable",
+ "api_info": {
+ "info": {
+ "type": "string",
+ "description": "base64 representation of an image"
+ },
+ "serialized_info": true
+ },
+ "example_inputs": {
+ "raw": "data:image/png;base64,R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==",
+ "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"
+ }
+ },
+ {
+ "id": 12,
+ "type": "json",
+ "props": {
+ "show_label": true,
+ "container": true,
+ "min_width": 160,
+ "name": "json",
+ "visible": true
+ },
+ "serializer": "JSONSerializable",
+ "api_info": {
+ "info": {
+ "type": {},
+ "description": "any valid json"
+ },
+ "serialized_info": true
+ },
+ "example_inputs": {
+ "raw": {
+ "a": 1,
+ "b": 2
+ },
+ "serialized": null
+ }
+ },
+ {
+ "id": 13,
+ "type": "button",
+ "props": {
+ "value": "Run",
+ "variant": "secondary",
+ "interactive": true,
+ "name": "button",
+ "visible": true
+ },
+ "serializer": "StringSerializable",
+ "api_info": {
+ "info": {
+ "type": "string"
+ },
+ "serialized_info": false
+ },
+ "example_inputs": {
+ "raw": "Howdy!",
+ "serialized": "Howdy!"
+ }
+ },
+ {
+ "id": 14,
+ "type": "textbox",
+ "props": {
+ "lines": 1,
+ "max_lines": 20,
+ "value": "",
+ "type": "text",
+ "autofocus": false,
+ "show_copy_button": false,
+ "container": true,
+ "rtl": false,
+ "show_label": true,
+ "min_width": 160,
+ "name": "textbox",
+ "visible": true
+ },
+ "serializer": "StringSerializable",
+ "api_info": {
+ "info": {
+ "type": "string"
+ },
+ "serialized_info": false
+ },
+ "example_inputs": {
+ "raw": "Howdy!",
+ "serialized": "Howdy!"
+ }
+ },
+ {
+ "id": 15,
+ "type": "form",
+ "props": {
+ "type": "form",
+ "scale": 0,
+ "min_width": 0,
+ "visible": true
+ }
+ },
+ {
+ "id": 16,
+ "type": "form",
+ "props": {
+ "type": "form",
+ "scale": 0,
+ "min_width": 0,
+ "visible": true
+ }
+ },
+ {
+ "id": 118,
+ "type": "textbox",
+ "props": {
+ "lines": 1,
+ "max_lines": 20,
+ "value": "",
+ "type": "text",
+ "autofocus": false,
+ "show_copy_button": false,
+ "container": true,
+ "rtl": false,
+ "show_label": true,
+ "min_width": 160,
+ "name": "textbox",
+ "visible": true
+ },
+ "serializer": "StringSerializable",
+ "api_info": {
+ "info": {
+ "type": "string"
+ },
+ "serialized_info": false
+ },
+ "example_inputs": {
+ "raw": "Howdy!",
+ "serialized": "Howdy!"
+ }
+ },
+ {
+ "id": 119,
+ "type": "form",
+ "props": {
+ "type": "form",
+ "scale": 0,
+ "min_width": 0,
+ "visible": true
+ }
+ }
+ ],
+ "css": null,
+ "title": "Gradio",
+ "space_id": null,
+ "enable_queue": null,
+ "show_error": true,
+ "show_api": true,
+ "is_colab": false,
+ "stylesheets": [
+ "https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap",
+ "https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;600&display=swap"
+ ],
+ "theme": "default",
+ "layout": {
+ "id": 117,
+ "children": [
+ {
+ "id": 1
+ },
+ {
+ "id": 15,
+ "children": [
+ {
+ "id": 2
+ }
+ ]
+ },
+ {
+ "id": 3,
+ "children": [
+ {
+ "id": 4,
+ "children": [
+ {
+ "id": 5,
+ "children": [
+ {
+ "id": 6
+ },
+ {
+ "id": 7
+ }
+ ]
+ },
+ {
+ "id": 8
+ }
+ ]
+ },
+ {
+ "id": 9,
+ "children": [
+ {
+ "id": 10,
+ "children": [
+ {
+ "id": 11
+ },
+ {
+ "id": 12
+ }
+ ]
+ },
+ {
+ "id": 13
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "id": 16,
+ "children": [
+ {
+ "id": 14
+ }
+ ]
+ },
+ {
+ "id": 119,
+ "children": [
+ {
+ "id": 118
+ }
+ ]
+ }
+ ]
+ },
+ "dependencies": [
+ {
+ "targets": [8],
+ "trigger": "click",
+ "inputs": [2, 6],
+ "outputs": [7],
+ "backend_fn": true,
+ "js": null,
+ "queue": null,
+ "api_name": null,
+ "scroll_to_output": false,
+ "show_progress": "full",
+ "every": null,
+ "batch": false,
+ "max_batch_size": 4,
+ "cancels": [],
+ "types": {
+ "continuous": false,
+ "generator": false
+ },
+ "collects_event_data": false,
+ "trigger_after": null,
+ "trigger_only_on_success": false
+ },
+ {
+ "targets": [13],
+ "trigger": "click",
+ "inputs": [2, 11],
+ "outputs": [12],
+ "backend_fn": true,
+ "js": null,
+ "queue": null,
+ "api_name": null,
+ "scroll_to_output": false,
+ "show_progress": "full",
+ "every": null,
+ "batch": false,
+ "max_batch_size": 4,
+ "cancels": [],
+ "types": {
+ "continuous": false,
+ "generator": false
+ },
+ "collects_event_data": false,
+ "trigger_after": null,
+ "trigger_only_on_success": false
+ },
+ {
+ "targets": [],
+ "trigger": "load",
+ "inputs": [],
+ "outputs": [118],
+ "backend_fn": true,
+ "js": null,
+ "queue": null,
+ "api_name": null,
+ "scroll_to_output": false,
+ "show_progress": "full",
+ "every": null,
+ "batch": false,
+ "max_batch_size": 4,
+ "cancels": [],
+ "types": {
+ "continuous": false,
+ "generator": false
+ },
+ "collects_event_data": false,
+ "trigger_after": null,
+ "trigger_only_on_success": false
+ }
+ ]
+}
| Gradio starting with 3.36 is extremely slow
### Describe the bug
I just upgraded my app from Gradio 3.32.0 to 3.36.1 (I cannot use versions in-between due to breaking change that was fixed post 3.35.2) and browser page load time grew from 5sec to 15sec.
Yes, its a highly complex app with too many controls, but 300% performance degradation is not what I would expect.
Once page load completes, actual functionality is on-par and I don't see any errors.
Note that assets download is not an issue, they all complete fast (seen in browser inspector network tab), its actual building of the UI, for example JS code inside `http://127.0.0.1:7860/assets/Blocks-7b1f1558.js` takes 5sec to execute alone.
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
1. Install [SD.Next](https://github.com/vladmandic/automatic) and start it using `webui.sh/webui.bat`
(first start will install a lot of dependencies so it may take long)
2. Change `requirements.txt` to use new Gradio and start again
### Screenshot
_No response_
### Logs
```shell
No error messages in console or browser console
```
### System Info
```shell
Server Gradio 3.36.1 on Ubuntu 22.04
Client Chrome 114 on Windows 11
```
### Severity
Blocking usage of gradio
| Nothing major has changed recently so this is somewhat surprising, although I do have a slight suspicion about what this might be. I'll investigate and report back!
I know what this is now and I'll fix it as part of #3261.
The issue is two-fold.
1. Complex apps with large numbers of components are all processed at app startup, regardless of how many are on view at any time.
2. We programatically attach events. For 'normal' events we know which ones the user has applied (`btn.click()` etc) so the impact in minimal (though not zero) but we also use events to handle errors dispatched from components, we almost always need to know about these events so we need to attach the listeners. Attaching listeners to hundreds or thousands of components is very expensive.
1\. is a multi-faceted issue and involves substantial changes to fix entirely.
- The first step is to ensure we only render things if they are visible (inactive tabs, `visible=false` should not be rendered). This _used_ to be the behaviour but we changed it because we only store the props (i.e. state as gradio-python understands it) in memory, we do not store the state necessary to rehydrate all components exactly as they are currently. This mean switching tabs resets state, for example, and we don't have sufficient data to recreate it exactly as it was. This will be taken care of at some point soon and can be done independently of gradio python and requires no breaking changes (although it isn't trivial).
- The ideal scenario is that the config is fully dynamic and can be updated at any time, rerendering the UI. This is a more substantial change because it will introduce a number of new challenges, none of which are insurmountable. This will probably involved breaking changes, or a significant enough internal refactor that it makes sense to release it as part of 4.0, if we decide to do it.
2\. This can be resolved fair simply by changing how we listen for events. I have a few ideas that I think will work but will need some testing. I will also look at refactoring other similar code to improve performance across the board. This _should_ result in a significant improvement to startup time for _all_ apps but especially complex ones with many components. It will also simplify some of our initialisation code, although there are tradeoffs.
I'll take care of this as I'm working on the refactors over the next few weeks.
any update on this issue? its open without any updates for over a month.
gradio 3.40 was just released and page load is so slow that brower reports "page is not responding" and asks to kill it.
in the meantime i'm stuck on gradio 3.32 since other blocking issue i had was only resolved in >=3.36.
also note its confirmed it has same impact on multiple apps, so this is turning into a big thing - i don't think this can be burried any longer under "technical debt" - its a massive regression.
cc @pngwn @abidlabs

pngwn mentioned here it would be worked on this week. https://github.com/gradio-app/gradio/pull/4651#issuecomment-1668483459
> I know what this is now and I'll fix it as part of #3261.
>
> The issue is two-fold.
>
> 1. Complex apps with large numbers of components are all processed at app startup, regardless of how many are on view at any time.
> 2. We programatically attach events. For 'normal' events we know which ones the user has applied (`btn.click()` etc) so the impact in minimal (though not zero) but we also use events to handle errors dispatched from components, we almost always need to know about these events so we need to attach the listeners. Attaching listeners to hundreds or thousands of components is very expensive.
>
> 1. is a multi-faceted issue and involves substantial changes to fix entirely.
>
> * The first step is to ensure we only render things if they are visible (inactive tabs, `visible=false` should not be rendered). This _used_ to be the behaviour but we changed it because we only store the props (i.e. state as gradio-python understands it) in memory, we do not store the state necessary to rehydrate all components exactly as they are currently. This mean switching tabs resets state, for example, and we don't have sufficient data to recreate it exactly as it was. This will be taken care of at some point soon and can be done independently of gradio python and requires no breaking changes (although it isn't trivial).
> * The ideal scenario is that the config is fully dynamic and can be updated at any time, rerendering the UI. This is a more substantial change because it will introduce a number of new challenges, none of which are insurmountable. This will probably involved breaking changes, or a significant enough internal refactor that it makes sense to release it as part of 4.0, if we decide to do it.
>
> 2. This can be resolved fair simply by changing how we listen for events. I have a few ideas that I think will work but will need some testing. I will also look at refactoring other similar code to improve performance across the board. This _should_ result in a significant improvement to startup time for _all_ apps but especially complex ones with many components. It will also simplify some of our initialisation code, although there are tradeoffs.
>
> I'll take care of this as I'm working on the refactors over the next few weeks.
for some ideas please have a look here https://web.dev/content-visibility/
it is not a good idea to destroy and recreate the components when visible this has been an issue with accordion, dropdown components. Performance issues related to view can be addressed with `content-visibility` I hope firefox will have it enabled by default in the next few months, all other modern browsers except safari support this feature
about the normal listeners optimization maybe a good idea is to attach them when the component gets focused or when pointer is over a component remove them when it loses the focus I have done this in the past and is very performant
the SD gradio-config json file has 68513 lines [2342] components [903] dependencies and it is hard to optimize with current gradio architecture
> any update on this issue? its open without any updates for over a month. gradio 3.40 was just released and page load is so slow that brower reports "page is not responding" and asks to kill it. in the meantime i'm stuck on gradio 3.32 since other blocking issue i had was only resolved in >=3.36.
>
> also note its confirmed it has same impact on multiple apps, so this is turning into a big thing - i don't think this can be burried any longer under "technical debt" - its a massive regression.
>
> cc @pngwn @abidlabs
>
> 
Same problems can be observed in [SD web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) (when you add multiple extensions, and UI size grows). It's kinda ridiculous for UI framework to initialize still relatively small number of components in time like minute - it's longer than whole OS boot time...
@pngwn Please make it top priority, and/or involve more people who could help solve this problem. It's kinda blocker for bigger Gradio apps.
Sorry for the delay! This is literally the the first thing I'll be taking care of tomorrow.
Content visibility would not help in this case.
> Sorry for the delay! This is literally the the first thing I'll be taking care of tomorrow.
>
> Content visibility would not help in this case.
https://svelte.dev/repl/2a0f3556497a43f1aaf88f9219dfc6a2?version=4.2.0
maybe event delegation can help reduce the amount of the event listeners | 2023-08-21T17:00:29 |
gradio-app/gradio | 5,961 | gradio-app__gradio-5961 | [
"5829"
] | f7475ee0375985ef28e54d9bc0df828a0bd5e7d2 | diff --git a/gradio/processing_utils.py b/gradio/processing_utils.py
--- a/gradio/processing_utils.py
+++ b/gradio/processing_utils.py
@@ -5,7 +5,6 @@
import json
import logging
import os
-import secrets
import shutil
import subprocess
import tempfile
@@ -15,12 +14,8 @@
from pathlib import Path
from typing import TYPE_CHECKING, Any, Literal
-import aiofiles
-import anyio
import numpy as np
import requests
-from anyio import CapacityLimiter
-from fastapi import UploadFile
from gradio_client import utils as client_utils
from PIL import Image, ImageOps, PngImagePlugin
@@ -207,44 +202,6 @@ def save_file_to_cache(file_path: str | Path, cache_dir: str) -> str:
return full_temp_file_path
-async def save_uploaded_file(
- file: UploadFile, upload_dir: str, limiter: CapacityLimiter | None = None
-) -> str:
- temp_dir = secrets.token_hex(
- 20
- ) # Since the full file is being uploaded anyways, there is no benefit to hashing the file.
- temp_dir = Path(upload_dir) / temp_dir
- temp_dir.mkdir(exist_ok=True, parents=True)
-
- sha1 = hashlib.sha1()
-
- if file.filename:
- file_name = Path(file.filename).name
- name = client_utils.strip_invalid_filename_characters(file_name)
- else:
- name = f"tmp{secrets.token_hex(5)}"
-
- full_temp_file_path = str(abspath(temp_dir / name))
-
- async with aiofiles.open(full_temp_file_path, "wb") as output_file:
- while True:
- content = await file.read(100 * 1024 * 1024)
- if not content:
- break
- sha1.update(content)
- await output_file.write(content)
-
- directory = Path(upload_dir) / sha1.hexdigest()
- directory.mkdir(exist_ok=True, parents=True)
- dest = (directory / name).resolve()
-
- await anyio.to_thread.run_sync(
- shutil.move, full_temp_file_path, dest, limiter=limiter
- )
-
- return str(dest)
-
-
def save_url_to_cache(url: str, cache_dir: str) -> str:
"""Downloads a file and makes a temporary file path for a copy if does not already
exist. Otherwise returns the path to the existing temp file."""
diff --git a/gradio/route_utils.py b/gradio/route_utils.py
--- a/gradio/route_utils.py
+++ b/gradio/route_utils.py
@@ -1,11 +1,17 @@
from __future__ import annotations
+import hashlib
import json
-from typing import TYPE_CHECKING, Optional, Union
+from tempfile import NamedTemporaryFile, _TemporaryFileWrapper
+from typing import TYPE_CHECKING, AsyncGenerator, BinaryIO, List, Optional, Tuple, Union
import fastapi
import httpx
+import multipart
from gradio_client.documentation import document, set_documentation_group
+from multipart.multipart import parse_options_header
+from starlette.datastructures import FormData, Headers, UploadFile
+from starlette.formparsers import MultiPartException, MultipartPart
from gradio import utils
from gradio.data_classes import PredictBody
@@ -264,3 +270,198 @@ def strip_url(orig_url: str) -> str:
stripped_url = parsed_url.copy_with(query=None)
stripped_url = str(stripped_url)
return stripped_url.rstrip("/")
+
+
+def _user_safe_decode(src: bytes, codec: str) -> str:
+ try:
+ return src.decode(codec)
+ except (UnicodeDecodeError, LookupError):
+ return src.decode("latin-1")
+
+
+class GradioUploadFile(UploadFile):
+ """UploadFile with a sha attribute."""
+
+ def __init__(
+ self,
+ file: BinaryIO,
+ *,
+ size: int | None = None,
+ filename: str | None = None,
+ headers: Headers | None = None,
+ ) -> None:
+ super().__init__(file, size=size, filename=filename, headers=headers)
+ self.sha = hashlib.sha1()
+
+
+class GradioMultiPartParser:
+ """Vendored from starlette.MultipartParser.
+
+ Thanks starlette!
+
+ Made the following modifications
+ - Use GradioUploadFile instead of UploadFile
+ - Use NamedTemporaryFile instead of SpooledTemporaryFile
+ - Compute hash of data as the request is streamed
+
+ """
+
+ max_file_size = 1024 * 1024
+
+ def __init__(
+ self,
+ headers: Headers,
+ stream: AsyncGenerator[bytes, None],
+ *,
+ max_files: Union[int, float] = 1000,
+ max_fields: Union[int, float] = 1000,
+ ) -> None:
+ assert (
+ multipart is not None
+ ), "The `python-multipart` library must be installed to use form parsing."
+ self.headers = headers
+ self.stream = stream
+ self.max_files = max_files
+ self.max_fields = max_fields
+ self.items: List[Tuple[str, Union[str, UploadFile]]] = []
+ self._current_files = 0
+ self._current_fields = 0
+ self._current_partial_header_name: bytes = b""
+ self._current_partial_header_value: bytes = b""
+ self._current_part = MultipartPart()
+ self._charset = ""
+ self._file_parts_to_write: List[Tuple[MultipartPart, bytes]] = []
+ self._file_parts_to_finish: List[MultipartPart] = []
+ self._files_to_close_on_error: List[_TemporaryFileWrapper] = []
+
+ def on_part_begin(self) -> None:
+ self._current_part = MultipartPart()
+
+ def on_part_data(self, data: bytes, start: int, end: int) -> None:
+ message_bytes = data[start:end]
+ if self._current_part.file is None:
+ self._current_part.data += message_bytes
+ else:
+ self._file_parts_to_write.append((self._current_part, message_bytes))
+
+ def on_part_end(self) -> None:
+ if self._current_part.file is None:
+ self.items.append(
+ (
+ self._current_part.field_name,
+ _user_safe_decode(self._current_part.data, self._charset),
+ )
+ )
+ else:
+ self._file_parts_to_finish.append(self._current_part)
+ # The file can be added to the items right now even though it's not
+ # finished yet, because it will be finished in the `parse()` method, before
+ # self.items is used in the return value.
+ self.items.append((self._current_part.field_name, self._current_part.file))
+
+ def on_header_field(self, data: bytes, start: int, end: int) -> None:
+ self._current_partial_header_name += data[start:end]
+
+ def on_header_value(self, data: bytes, start: int, end: int) -> None:
+ self._current_partial_header_value += data[start:end]
+
+ def on_header_end(self) -> None:
+ field = self._current_partial_header_name.lower()
+ if field == b"content-disposition":
+ self._current_part.content_disposition = self._current_partial_header_value
+ self._current_part.item_headers.append(
+ (field, self._current_partial_header_value)
+ )
+ self._current_partial_header_name = b""
+ self._current_partial_header_value = b""
+
+ def on_headers_finished(self) -> None:
+ disposition, options = parse_options_header(
+ self._current_part.content_disposition
+ )
+ try:
+ self._current_part.field_name = _user_safe_decode(
+ options[b"name"], self._charset
+ )
+ except KeyError as e:
+ raise MultiPartException(
+ 'The Content-Disposition header field "name" must be ' "provided."
+ ) from e
+ if b"filename" in options:
+ self._current_files += 1
+ if self._current_files > self.max_files:
+ raise MultiPartException(
+ f"Too many files. Maximum number of files is {self.max_files}."
+ )
+ filename = _user_safe_decode(options[b"filename"], self._charset)
+ tempfile = NamedTemporaryFile(delete=False)
+ self._files_to_close_on_error.append(tempfile)
+ self._current_part.file = GradioUploadFile(
+ file=tempfile, # type: ignore[arg-type]
+ size=0,
+ filename=filename,
+ headers=Headers(raw=self._current_part.item_headers),
+ )
+ else:
+ self._current_fields += 1
+ if self._current_fields > self.max_fields:
+ raise MultiPartException(
+ f"Too many fields. Maximum number of fields is {self.max_fields}."
+ )
+ self._current_part.file = None
+
+ def on_end(self) -> None:
+ pass
+
+ async def parse(self) -> FormData:
+ # Parse the Content-Type header to get the multipart boundary.
+ _, params = parse_options_header(self.headers["Content-Type"])
+ charset = params.get(b"charset", "utf-8")
+ if type(charset) == bytes:
+ charset = charset.decode("latin-1")
+ self._charset = charset
+ try:
+ boundary = params[b"boundary"]
+ except KeyError as e:
+ raise MultiPartException("Missing boundary in multipart.") from e
+
+ # Callbacks dictionary.
+ callbacks = {
+ "on_part_begin": self.on_part_begin,
+ "on_part_data": self.on_part_data,
+ "on_part_end": self.on_part_end,
+ "on_header_field": self.on_header_field,
+ "on_header_value": self.on_header_value,
+ "on_header_end": self.on_header_end,
+ "on_headers_finished": self.on_headers_finished,
+ "on_end": self.on_end,
+ }
+
+ # Create the parser.
+ parser = multipart.MultipartParser(boundary, callbacks)
+ try:
+ # Feed the parser with data from the request.
+ async for chunk in self.stream:
+ parser.write(chunk)
+ # Write file data, it needs to use await with the UploadFile methods
+ # that call the corresponding file methods *in a threadpool*,
+ # otherwise, if they were called directly in the callback methods above
+ # (regular, non-async functions), that would block the event loop in
+ # the main thread.
+ for part, data in self._file_parts_to_write:
+ assert part.file # for type checkers
+ await part.file.write(data)
+ part.file.sha.update(data) # type: ignore
+ for part in self._file_parts_to_finish:
+ assert part.file # for type checkers
+ await part.file.seek(0)
+ self._file_parts_to_write.clear()
+ self._file_parts_to_finish.clear()
+ except MultiPartException as exc:
+ # Close all the files if there was an error.
+ for file in self._files_to_close_on_error:
+ file.close()
+ raise exc
+
+ parser.finalize()
+ return FormData(self.items)
diff --git a/gradio/routes.py b/gradio/routes.py
--- a/gradio/routes.py
+++ b/gradio/routes.py
@@ -15,6 +15,7 @@
import os
import posixpath
import secrets
+import shutil
import tempfile
import threading
import time
@@ -24,11 +25,12 @@
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type
+import anyio
import fastapi
import httpx
import markupsafe
import orjson
-from fastapi import Depends, FastAPI, File, HTTPException, UploadFile, WebSocket, status
+from fastapi import Depends, FastAPI, HTTPException, WebSocket, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import (
FileResponse,
@@ -38,22 +40,29 @@
)
from fastapi.security import OAuth2PasswordRequestForm
from fastapi.templating import Jinja2Templates
+from gradio_client import utils as client_utils
from gradio_client.documentation import document, set_documentation_group
from jinja2.exceptions import TemplateNotFound
+from multipart.multipart import parse_options_header
from starlette.background import BackgroundTask
from starlette.responses import RedirectResponse, StreamingResponse
from starlette.websockets import WebSocketState
import gradio
import gradio.ranged_response as ranged_response
-from gradio import processing_utils, route_utils, utils, wasm_utils
+from gradio import route_utils, utils, wasm_utils
from gradio.context import Context
from gradio.data_classes import ComponentServerBody, PredictBody, ResetBody
from gradio.deprecation import warn_deprecation
from gradio.exceptions import Error
from gradio.oauth import attach_oauth
from gradio.queueing import Estimation, Event
-from gradio.route_utils import Request # noqa: F401
+from gradio.route_utils import ( # noqa: F401
+ GradioMultiPartParser,
+ GradioUploadFile,
+ MultiPartException,
+ Request,
+)
from gradio.state_holder import StateHolder
from gradio.utils import (
cancel_tasks,
@@ -654,16 +663,42 @@ async def get_queue_status():
return app.get_blocks()._queue.get_estimation()
@app.post("/upload", dependencies=[Depends(login_check)])
- async def upload_file(
- files: List[UploadFile] = File(...),
- ):
+ async def upload_file(request: fastapi.Request):
+ content_type_header = request.headers.get("Content-Type")
+ content_type: bytes
+ content_type, _ = parse_options_header(content_type_header)
+ if content_type != b"multipart/form-data":
+ raise HTTPException(status_code=400, detail="Invalid content type.")
+
+ try:
+ multipart_parser = GradioMultiPartParser(
+ request.headers,
+ request.stream(),
+ max_files=1000,
+ max_fields=1000,
+ )
+ form = await multipart_parser.parse()
+ except MultiPartException as exc:
+ raise HTTPException(status_code=400, detail=exc.message) from exc
+
output_files = []
- for input_file in files:
- output_files.append(
- await processing_utils.save_uploaded_file(
- input_file, app.uploaded_file_dir, app.get_blocks().limiter
- )
+ for temp_file in form.getlist("files"):
+ assert isinstance(temp_file, GradioUploadFile)
+ if temp_file.filename:
+ file_name = Path(temp_file.filename).name
+ name = client_utils.strip_invalid_filename_characters(file_name)
+ else:
+ name = f"tmp{secrets.token_hex(5)}"
+ directory = Path(app.uploaded_file_dir) / temp_file.sha.hexdigest()
+ directory.mkdir(exist_ok=True, parents=True)
+ dest = (directory / name).resolve()
+ await anyio.to_thread.run_sync(
+ shutil.move,
+ temp_file.file.name,
+ dest,
+ limiter=app.get_blocks().limiter,
)
+ output_files.append(dest)
return output_files
@app.on_event("startup")
| diff --git a/test/test_gradio_component_cli.py b/test/test_gradio_component_cli.py
--- a/test/test_gradio_component_cli.py
+++ b/test/test_gradio_component_cli.py
@@ -60,7 +60,7 @@ def test_overwrite_deletes_previous_content(tmp_path):
_create("MyGallery", tmp_path, template="Gallery", overwrite=True)
_create("MySlider", tmp_path, template="Slider", overwrite=True)
assert (tmp_path / "frontend" / "interactive" / "InteractiveSlider.svelte").exists()
- assert not (tmp_path / "frontend" / "static" / "StaticGallery.svelte").exists()
+ assert not (tmp_path / "frontend" / "shared" / "Gallery.svelte").exists()
def test_do_not_replace_class_name_in_import_statement(tmp_path):
| Investigate UploadFile Optimization
- [ ] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
The UploadRoute uses `FileUpload` which under the hood uses a `SpooledTemporaryFile` [link](https://fastapi.tiangolo.com/tutorial/request-files/?h=upload#__tabbed_3_3). This will write the file to disk if its larger than a certain size. We then iterate over the file and write it to a separate location.
So we're writing the file twice. I wonder if we can just copy the file to a new location? Or iterate through the bytes of the file without having to write it to disk.
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Additional context**
Add any other context or screenshots about the feature request here.
| I think a hash dict could be an option. Good issue, thought about this recently. | 2023-10-17T19:39:38 |
gradio-app/gradio | 6,234 | gradio-app__gradio-6234 | [
"6202"
] | d7a1a6559005e6a1e0be03a3bd5212d1bc60d1ee | diff --git a/demo/audio_component_events/run.py b/demo/audio_component_events/run.py
new file mode 100644
--- /dev/null
+++ b/demo/audio_component_events/run.py
@@ -0,0 +1,21 @@
+import gradio as gr
+
+with gr.Blocks() as demo:
+ with gr.Row():
+ with gr.Column():
+ input_video = gr.Audio(label="Input Audio", sources=["upload", "microphone"])
+ with gr.Column():
+ output_video = gr.Audio(label="Output Audio", sources=["upload", "microphone"])
+ with gr.Column():
+ num_change = gr.Number(label="# Change Events", value=0)
+ num_load = gr.Number(label="# Upload Events", value=0)
+ num_play = gr.Number(label="# Play Events", value=0)
+ num_pause = gr.Number(label="# Pause Events", value=0)
+ input_video.upload(lambda s, n: (s, n + 1), [input_video, num_load], [output_video, num_load])
+ input_video.change(lambda n: n + 1, num_change, num_change)
+ input_video.play(lambda n: n + 1, num_play, num_play)
+ input_video.pause(lambda n: n + 1, num_pause, num_pause)
+ input_video.change(lambda n: n + 1, num_change, num_change)
+
+if __name__ == "__main__":
+ demo.launch()
\ No newline at end of file
diff --git a/demo/video_component_events/run.py b/demo/video_component_events/run.py
new file mode 100644
--- /dev/null
+++ b/demo/video_component_events/run.py
@@ -0,0 +1,21 @@
+import gradio as gr
+
+with gr.Blocks() as demo:
+ with gr.Row():
+ with gr.Column():
+ input_video = gr.Video(label="Input Video")
+ with gr.Column():
+ output_video = gr.Video(label="Output Video")
+ with gr.Column():
+ num_change = gr.Number(label="# Change Events", value=0)
+ num_load = gr.Number(label="# Upload Events", value=0)
+ num_play = gr.Number(label="# Play Events", value=0)
+ num_pause = gr.Number(label="# Pause Events", value=0)
+ input_video.upload(lambda s, n: (s, n + 1), [input_video, num_load], [output_video, num_load])
+ input_video.change(lambda n: n + 1, num_change, num_change)
+ input_video.play(lambda n: n + 1, num_play, num_play)
+ input_video.pause(lambda n: n + 1, num_pause, num_pause)
+ input_video.change(lambda n: n + 1, num_change, num_change)
+
+if __name__ == "__main__":
+ demo.launch()
\ No newline at end of file
diff --git a/gradio/processing_utils.py b/gradio/processing_utils.py
--- a/gradio/processing_utils.py
+++ b/gradio/processing_utils.py
@@ -258,6 +258,7 @@ def move_resource_to_block_cache(url_or_file_path: str | Path, block: Component)
temp_file_path = save_url_to_cache(
url_or_file_path, cache_dir=block.GRADIO_CACHE
)
+
block.temp_files.add(temp_file_path)
else:
url_or_file_path = str(abspath(url_or_file_path))
@@ -265,9 +266,9 @@ def move_resource_to_block_cache(url_or_file_path: str | Path, block: Component)
temp_file_path = save_file_to_cache(
url_or_file_path, cache_dir=block.GRADIO_CACHE
)
- block.temp_files.add(temp_file_path)
else:
temp_file_path = url_or_file_path
+ block.temp_files.add(temp_file_path)
return temp_file_path
| diff --git a/js/app/test/audio_component_events.spec.ts b/js/app/test/audio_component_events.spec.ts
new file mode 100644
--- /dev/null
+++ b/js/app/test/audio_component_events.spec.ts
@@ -0,0 +1,59 @@
+import { test, expect, drag_and_drop_file } from "@gradio/tootils";
+
+test("Audio click-to-upload uploads audio successfuly.", async ({ page }) => {
+ await page
+ .getByRole("button", { name: "Drop Audio Here - or - Click to Upload" })
+ .click();
+ const uploader = await page.locator("input[type=file]");
+ await Promise.all([
+ uploader.setInputFiles(["../../test/test_files/audio_sample.wav"]),
+ page.waitForResponse("**/upload")
+ ]);
+
+ await expect(page.getByLabel("# Change Events")).toHaveValue("1");
+ await expect(page.getByLabel("# Upload Events")).toHaveValue("1");
+
+ await page.getByLabel("Clear").click();
+ await expect(page.getByLabel("# Change Events")).toHaveValue("2");
+ await page
+ .getByRole("button", { name: "Drop Audio Here - or - Click to Upload" })
+ .click();
+
+ await Promise.all([
+ uploader.setInputFiles(["../../test/test_files/audio_sample.wav"]),
+ page.waitForResponse("**/upload")
+ ]);
+
+ await expect(page.getByLabel("# Change Events")).toHaveValue("3");
+ await expect(page.getByLabel("# Upload Events")).toHaveValue("2");
+});
+
+test("Audio drag-and-drop uploads a file to the server correctly.", async ({
+ page
+}) => {
+ await Promise.all([
+ drag_and_drop_file(
+ page,
+ "input[type=file]",
+ "../../test/test_files/audio_sample.wav",
+ "audio_sample.wav",
+ "audio/wav"
+ ),
+ page.waitForResponse("**/upload")
+ ]);
+ await expect(page.getByLabel("# Change Events")).toHaveValue("1");
+ await expect(page.getByLabel("# Upload Events")).toHaveValue("1");
+});
+
+test("Audio drag-and-drop displays a warning when the file is of the wrong mime type.", async ({
+ page
+}) => {
+ await drag_and_drop_file(
+ page,
+ "input[type=file]",
+ "../../test/test_files/audio_sample.wav",
+ "audio_sample.wav"
+ );
+ const toast = page.getByTestId("toast-body");
+ expect(toast).toContainText("warning");
+});
diff --git a/js/app/test/files/file_test.ogg b/js/app/test/files/file_test.ogg
new file mode 100644
Binary files /dev/null and b/js/app/test/files/file_test.ogg differ
diff --git a/js/app/test/video_component_events.spec.ts b/js/app/test/video_component_events.spec.ts
new file mode 100644
--- /dev/null
+++ b/js/app/test/video_component_events.spec.ts
@@ -0,0 +1,69 @@
+import { test, expect, drag_and_drop_file } from "@gradio/tootils";
+
+test("Video click-to-upload uploads video successfuly. Clear, play, and pause buttons dispatch events correctly.", async ({
+ page
+}) => {
+ await page
+ .getByRole("button", { name: "Drop Video Here - or - Click to Upload" })
+ .click();
+ const uploader = await page.locator("input[type=file]");
+ await Promise.all([
+ uploader.setInputFiles(["./test/files/file_test.ogg"]),
+ page.waitForResponse("**/upload")
+ ]);
+
+ await expect(page.getByLabel("# Change Events")).toHaveValue("1");
+ await expect(page.getByLabel("# Upload Events")).toHaveValue("1");
+
+ await page.getByLabel("play-pause-replay-button").nth(0).click();
+ await page.getByLabel("play-pause-replay-button").nth(0).click();
+ await expect(page.getByLabel("# Play Events")).toHaveValue("1");
+ await expect(page.getByLabel("# Pause Events")).toHaveValue("1");
+
+ await page.getByLabel("Clear").click();
+ await expect(page.getByLabel("# Change Events")).toHaveValue("2");
+ await page
+ .getByRole("button", { name: "Drop Video Here - or - Click to Upload" })
+ .click();
+
+ await Promise.all([
+ uploader.setInputFiles(["./test/files/file_test.ogg"]),
+ page.waitForResponse("**/upload")
+ ]);
+
+ await expect(page.getByLabel("# Change Events")).toHaveValue("3");
+ await expect(page.getByLabel("# Upload Events")).toHaveValue("2");
+
+ await page.getByLabel("play-pause-replay-button").first().click();
+ await page.getByLabel("play-pause-replay-button").first().click();
+ await expect(page.getByLabel("# Play Events")).toHaveValue("2");
+ await expect(page.getByLabel("# Pause Events")).toHaveValue("2");
+});
+
+test("Video drag-and-drop uploads a file to the server correctly.", async ({
+ page
+}) => {
+ await drag_and_drop_file(
+ page,
+ "input[type=file]",
+ "./test/files/file_test.ogg",
+ "file_test.ogg",
+ "video/*"
+ );
+ await page.waitForResponse("**/upload");
+ await expect(page.getByLabel("# Change Events")).toHaveValue("1");
+ await expect(page.getByLabel("# Upload Events")).toHaveValue("1");
+});
+
+test("Video drag-and-drop displays a warning when the file is of the wrong mime type.", async ({
+ page
+}) => {
+ await drag_and_drop_file(
+ page,
+ "input[type=file]",
+ "./test/files/file_test.ogg",
+ "file_test.ogg"
+ );
+ const toast = page.getByTestId("toast-body");
+ expect(toast).toContainText("warning");
+});
| [Gradio 4] Drag and drop is not working for `gr.Video` and `gr.Audio`
### Describe the bug
It's possible to click the area to upload a video, but drag & drop doesn't seem to work.
https://github.com/gradio-app/gradio/assets/25161192/a117d8f5-dadc-4970-882c-074a04d50625
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
```py
import gradio as gr
gr.Interface(fn=lambda x: x, inputs="video", outputs="video").launch()
```
### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
gradio==4.0.2
```
### Severity
I can work around it
| Drag and drop doesn't work for `gr.Audio` too. | 2023-11-01T19:00:18 |
gradio-app/gradio | 6,254 | gradio-app__gradio-6254 | [
"6207"
] | 0bafdcb550feefceb984e910c01c15d9d398ee1b | diff --git a/gradio/templates.py b/gradio/templates.py
--- a/gradio/templates.py
+++ b/gradio/templates.py
@@ -326,7 +326,7 @@ def __init__(
class Microphone(components.Audio):
"""
- Sets: source="microphone"
+ Sets: sources=["microphone"]
"""
is_template = True
| diff --git a/js/app/test/audio_debugger.spec.ts b/js/app/test/audio_debugger.spec.ts
new file mode 100644
--- /dev/null
+++ b/js/app/test/audio_debugger.spec.ts
@@ -0,0 +1,39 @@
+import { test } from "@gradio/tootils";
+
+// we cannot currently test the waveform canvas with playwright (https://github.com/microsoft/playwright/issues/23964)
+// so this test covers the interactive elements around the waveform canvas
+
+test("audio waveform", async ({ page }) => {
+ await page.getByRole("button", { name: "Interface" }).click();
+ await page.getByRole("button", { name: "cantina.wav" }).click();
+ await page
+ .getByTestId("waveform-x")
+ .getByLabel("Adjust playback speed to 1.5x")
+ .click();
+ await page.getByLabel("Adjust playback speed to 2x").click();
+
+ await page
+ .getByTestId("waveform-x")
+ .getByLabel("Skip forward by 0.15 seconds")
+ .click();
+ await page
+ .getByTestId("waveform-x")
+ .getByLabel("Skip backwards by 0.15 seconds")
+ .click();
+ await page.getByLabel("Trim audio to selection").click();
+ await page.getByRole("button", { name: "Trim" }).click();
+ await page.getByLabel("Reset audio").click();
+ await page.getByRole("button", { name: "Submit" }).click();
+ await page
+ .getByTestId("waveform-output")
+ .getByLabel("Adjust playback speed to 1.5x")
+ .click();
+ await page
+ .getByTestId("waveform-output")
+ .getByLabel("Skip backwards by 0.15 seconds")
+ .click();
+ await page
+ .getByTestId("waveform-output")
+ .getByLabel("Skip forward by 0.15 seconds")
+ .click();
+});
diff --git a/js/audio/audio.test.ts b/js/audio/audio.test.ts
--- a/js/audio/audio.test.ts
+++ b/js/audio/audio.test.ts
@@ -1,6 +1,6 @@
-import { test, describe, assert, afterEach } from "vitest";
+import { test, describe, assert, afterEach, vi } from "vitest";
import { cleanup, render } from "@gradio/tootils";
-import Audio from "./Index.svelte";
+import Audio from "./";
import type { LoadingStatus } from "@gradio/statustracker";
import { setupi18n } from "../app/src/i18n";
import ResizeObserver from "resize-observer-polyfill";
@@ -18,6 +18,18 @@ const loading_status: LoadingStatus = {
show_progress: "full"
};
+const default_values = {
+ loading_status,
+ label: "music",
+ value: {
+ url: "https://www.soundhelix.com/examples/mp3/SoundHelix-Song-1.mp3",
+ path: "https://www.soundhelix.com/examples/mp3/SoundHelix-Song-1.mp3",
+ orig_name: "SoundHelix-Song-1.mp3"
+ },
+ root: "",
+ proxy_url: "",
+ show_label: true
+};
describe("Audio", () => {
setupi18n();
@@ -25,18 +37,86 @@ describe("Audio", () => {
test("renders audio component", async () => {
const { getAllByTestId } = await render(Audio, {
- loading_status,
- label: "music",
- value: {
- url: "https://www.soundhelix.com/examples/mp3/SoundHelix-Song-1.mp3",
- path: "https://www.soundhelix.com/examples/mp3/SoundHelix-Song-1.mp3",
- orig_name: "SoundHelix-Song-1.mp3"
- },
- root: "",
- proxy_url: "",
- theme_mode: "dark"
+ ...default_values,
+ interactive: true,
+ sources: ["microphone", "upload"],
+ pending: false,
+ streaming: false
});
assert.exists(getAllByTestId("waveform-music"));
});
+
+ test("renders audio component with audio controls", async () => {
+ const { getAllByTestId, getAllByLabelText, getAllByText } = await render(
+ Audio,
+ {
+ ...default_values,
+ streaming: false,
+ pending: false,
+ sources: ["microphone"],
+ interactive: true
+ }
+ );
+
+ assert.exists(getAllByTestId("waveform-controls"));
+
+ assert.exists(getAllByLabelText("Trim audio to selection"));
+ assert.exists(getAllByLabelText("Reset audio"));
+ assert.exists(getAllByText("0:00"));
+ assert.exists(getAllByLabelText("audio.play"));
+ assert.exists(getAllByLabelText("Adjust volume"));
+ assert.exists(getAllByLabelText("Adjust playback speed to 1.5x"));
+ assert.exists(getAllByLabelText("Skip forward by 5 seconds"));
+ });
+
+ test("does not render with audio editing controls when not interactive", async () => {
+ const { getAllByTestId, queryByLabelText } = await render(Audio, {
+ ...default_values,
+ streaming: false,
+ pending: false,
+ sources: ["microphone"],
+ interactive: false
+ });
+
+ assert.exists(getAllByTestId("waveform-controls"));
+ assert.notExists(queryByLabelText("Trim audio to selection"));
+ assert.notExists(queryByLabelText("Reset audio"));
+ });
+
+ test("renders source selection with correct selected source", async () => {
+ const { getByTestId, getByLabelText } = await render(Audio, {
+ ...default_values,
+ streaming: false,
+ pending: false,
+ sources: ["microphone", "upload"],
+ interactive: true
+ });
+
+ assert.exists(getByTestId("source-select"));
+ assert.lengthOf(getByTestId("source-select").children, 2);
+ assert.exists(getByLabelText("Record audio"));
+
+ assert.equal(
+ getByLabelText("Record audio").classList.contains("selected"),
+ true
+ );
+
+ assert.equal(
+ getByLabelText("Upload file").classList.contains("selected"),
+ false
+ );
+ });
+
+ test("does not render source selection when upload is only source", async () => {
+ const { queryByTestId } = await render(Audio, {
+ ...default_values,
+ streaming: false,
+ pending: false,
+ sources: ["upload"],
+ interactive: true
+ });
+
+ assert.notExists(queryByTestId("source-select"));
+ });
});
| [Gradio 4] Adjust waveforms depending on the sound volume range in `gr.Audio`
- [x] I have searched to see if a similar issue already exists.
For loud audio input, the waveform appears like this, and is almost meaningless. So, it would be nice if the display scale of the waveform could be changed according to the sound volume of the input audio.

| 2023-11-02T15:24:55 |
|
gradio-app/gradio | 6,277 | gradio-app__gradio-6277 | [
"6276"
] | e3ede2ff7d4a36fb21bb0b146b8d5ad239c0e086 | diff --git a/demo/gallery_selections/run.py b/demo/gallery_selections/run.py
--- a/demo/gallery_selections/run.py
+++ b/demo/gallery_selections/run.py
@@ -3,7 +3,10 @@
with gr.Blocks() as demo:
imgs = gr.State()
- gallery = gr.Gallery()
+ gallery = gr.Gallery(allow_preview=False)
+
+ def deselect_images():
+ return gr.Gallery(selected_index=None)
def generate_images():
images = []
@@ -19,6 +22,9 @@ def generate_images():
with gr.Row():
selected = gr.Number(show_label=False)
darken_btn = gr.Button("Darken selected")
+ deselect_button = gr.Button("Deselect")
+
+ deselect_button.click(deselect_images, None, gallery)
def get_select_index(evt: gr.SelectData):
return evt.index
| `gr.Gallery` `selected_index` doesn't update as expected
### Describe the bug
**Issue**: A user wants to programmatically unselect a selected gallery item in the Gradio UI and remove the orange selection visual.
**Conversation**:
- Suggested a solution using the function:
```python
def deselect():
return gr.Gallery(selected_index=None)
```
- Confirmed the solution works but noticed a new issue. After using the `deselect` function, subsequent selections don't show the orange visual, even though the selection functionality is still operational.
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
^
### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
^
```
### Severity
I can work around it
| 2023-11-03T12:55:24 |
||
gradio-app/gradio | 6,290 | gradio-app__gradio-6290 | [
"6252"
] | 36f1972ab16d6953b898754634c420daa53c5e33 | diff --git a/demo/dataframe_component/run.py b/demo/dataframe_component/run.py
--- a/demo/dataframe_component/run.py
+++ b/demo/dataframe_component/run.py
@@ -1,6 +1,6 @@
-import gradio as gr
+import gradio as gr
with gr.Blocks() as demo:
gr.Dataframe(interactive=True)
-demo.launch()
\ No newline at end of file
+demo.launch()
| Gradio.Dataframe does no longer properly pass values updated in the GUI when used as input from version 4
### Describe the bug
When running the [filter records example](https://www.gradio.app/docs/dataframe) code in `3.50.2` the records are filtered based on gender-entries a user made in the webpage and based on the selected gender in the dropdown. These records are displayed in the output dataframe as expected.
However, in gradio version `4.0.2` the values that are entered in the webpage are not properly passed to the backend. Thus, no output is visible in the output dataframe.
Changing the `filter_records(..)` to include a print-statement shows that the values from the GUI are not reaching the backend:
```
def filter_records(records, gender):
print(records)
return records[records["gender"] == gender]
```
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
1. Install gradio version `4.0.2`
2. Add the code from the [filter records example](https://www.gradio.app/docs/dataframe) to a file `df_example.py`
3. Run `python df_example.py`
4. Optionally add the print-statement as mentioned before
5. Enter values in the interface and press `Submit`
6. Observe that the output dataframe stays empty
### Screenshot
_No response_
### Logs
```shell
No (error) logs given
```
### System Info
```shell
gradio environment
Gradio Environment Information:
------------------------------
Operating System: Windows
gradio version: 4.0.2
gradio_client version: 0.7.0
------------------------------------------------
gradio dependencies in your environment:
aiofiles: 23.2.1
altair: 5.1.2
fastapi: 0.104.1
ffmpy: 0.3.1
gradio-client==0.7.0 is not installed.
httpx: 0.25.0
huggingface-hub: 0.18.0
importlib-resources: 6.1.0
jinja2: 3.1.2
markupsafe: 2.1.1
matplotlib: 3.7.0
numpy: 1.23.5
orjson: 3.9.10
packaging: 22.0
pandas: 1.5.3
pillow: 9.4.0
pydantic: 2.4.2
pydub: 0.25.1
python-multipart: 0.0.6
pyyaml: 6.0
requests: 2.28.1
semantic-version: 2.10.0
tomlkit==0.12.0 is not installed.
typer: 0.9.0
typing-extensions: 4.8.0
uvicorn: 0.23.2
websockets: 11.0.3
authlib; extra == 'oauth' is not installed.
itsdangerous; extra == 'oauth' is not installed.
gradio_client dependencies in your environment:
fsspec: 2023.10.0
httpx: 0.25.0
huggingface-hub: 0.18.0
packaging: 22.0
requests: 2.28.1
typing-extensions: 4.8.0
websockets: 11.0.3
```
### Severity
I can work around it
| Thanks for creating the issue @Rjdrenth, taking a look | 2023-11-03T22:08:33 |
|
gradio-app/gradio | 6,307 | gradio-app__gradio-6307 | [
"6218"
] | 25e380078a3d97bf4f2393dcb8d488da26e21ebb | diff --git a/gradio/route_utils.py b/gradio/route_utils.py
--- a/gradio/route_utils.py
+++ b/gradio/route_utils.py
@@ -2,6 +2,8 @@
import hashlib
import json
+from collections import deque
+from dataclasses import dataclass as python_dataclass
from tempfile import NamedTemporaryFile, _TemporaryFileWrapper
from typing import TYPE_CHECKING, AsyncGenerator, BinaryIO, List, Optional, Tuple, Union
@@ -294,6 +296,44 @@ def __init__(
self.sha = hashlib.sha1()
+@python_dataclass(frozen=True)
+class FileUploadProgressUnit:
+ filename: str
+ chunk_size: int
+ is_done: bool
+
+
+class FileUploadProgress:
+ def __init__(self) -> None:
+ self._statuses: dict[str, deque[FileUploadProgressUnit]] = {}
+
+ def track(self, upload_id: str):
+ if upload_id not in self._statuses:
+ self._statuses[upload_id] = deque()
+
+ def update(self, upload_id: str, filename: str, message_bytes: bytes):
+ if upload_id not in self._statuses:
+ self._statuses[upload_id] = deque()
+ self._statuses[upload_id].append(
+ FileUploadProgressUnit(filename, len(message_bytes), is_done=False)
+ )
+
+ def set_done(self, upload_id: str):
+ self._statuses[upload_id].append(FileUploadProgressUnit("", 0, is_done=True))
+
+ def stop_tracking(self, upload_id: str):
+ if upload_id in self._statuses:
+ del self._statuses[upload_id]
+
+ def status(self, upload_id: str) -> deque[FileUploadProgressUnit]:
+ if upload_id not in self._statuses:
+ return deque()
+ return self._statuses[upload_id]
+
+ def is_tracked(self, upload_id: str):
+ return upload_id in self._statuses
+
+
class GradioMultiPartParser:
"""Vendored from starlette.MultipartParser.
@@ -315,6 +355,8 @@ def __init__(
*,
max_files: Union[int, float] = 1000,
max_fields: Union[int, float] = 1000,
+ upload_id: str | None = None,
+ upload_progress: FileUploadProgress | None = None,
) -> None:
assert (
multipart is not None
@@ -324,6 +366,8 @@ def __init__(
self.max_files = max_files
self.max_fields = max_fields
self.items: List[Tuple[str, Union[str, UploadFile]]] = []
+ self.upload_id = upload_id
+ self.upload_progress = upload_progress
self._current_files = 0
self._current_fields = 0
self._current_partial_header_name: bytes = b""
@@ -339,6 +383,10 @@ def on_part_begin(self) -> None:
def on_part_data(self, data: bytes, start: int, end: int) -> None:
message_bytes = data[start:end]
+ if self.upload_progress is not None:
+ self.upload_progress.update(
+ self.upload_id, self._current_part.file.filename, message_bytes # type: ignore
+ )
if self._current_part.file is None:
self._current_part.data += message_bytes
else:
@@ -464,4 +512,6 @@ async def parse(self) -> FormData:
raise exc
parser.finalize()
+ if self.upload_progress is not None:
+ self.upload_progress.set_done(self.upload_id) # type: ignore
return FormData(self.items)
diff --git a/gradio/routes.py b/gradio/routes.py
--- a/gradio/routes.py
+++ b/gradio/routes.py
@@ -56,6 +56,7 @@
from gradio.oauth import attach_oauth
from gradio.queueing import Estimation, Event
from gradio.route_utils import ( # noqa: F401
+ FileUploadProgress,
GradioMultiPartParser,
GradioUploadFile,
MultiPartException,
@@ -121,6 +122,9 @@ def move_uploaded_files_to_cache(files: list[str], destinations: list[str]) -> N
shutil.move(file, dest)
+file_upload_statuses = FileUploadProgress()
+
+
class App(FastAPI):
"""
FastAPI App Wrapper
@@ -681,8 +685,57 @@ def component_server(body: ComponentServerBody):
async def get_queue_status():
return app.get_blocks()._queue.get_estimation()
+ @app.get("/upload_progress")
+ def get_upload_progress(upload_id: str, request: fastapi.Request):
+ async def sse_stream(request: fastapi.Request):
+ last_heartbeat = time.perf_counter()
+ is_done = False
+ while True:
+ if await request.is_disconnected():
+ file_upload_statuses.stop_tracking(upload_id)
+ return
+ if is_done:
+ file_upload_statuses.stop_tracking(upload_id)
+ return
+
+ heartbeat_rate = 15
+ check_rate = 0.05
+ message = None
+ try:
+ if update := file_upload_statuses.status(upload_id).popleft():
+ if update.is_done:
+ message = {"msg": "done"}
+ is_done = True
+ else:
+ message = {
+ "msg": "update",
+ "orig_name": update.filename,
+ "chunk_size": update.chunk_size,
+ }
+ else:
+ await asyncio.sleep(check_rate)
+ if time.perf_counter() - last_heartbeat > heartbeat_rate:
+ message = {"msg": "heartbeat"}
+ last_heartbeat = time.perf_counter()
+ if message:
+ yield f"data: {json.dumps(message)}\n\n"
+ except IndexError:
+ if not file_upload_statuses.is_tracked(upload_id):
+ return
+ # pop from empty queue
+ continue
+
+ return StreamingResponse(
+ sse_stream(request),
+ media_type="text/event-stream",
+ )
+
@app.post("/upload", dependencies=[Depends(login_check)])
- async def upload_file(request: fastapi.Request, bg_tasks: BackgroundTasks):
+ async def upload_file(
+ request: fastapi.Request,
+ bg_tasks: BackgroundTasks,
+ upload_id: Optional[str] = None,
+ ):
content_type_header = request.headers.get("Content-Type")
content_type: bytes
content_type, _ = parse_options_header(content_type_header)
@@ -690,11 +743,15 @@ async def upload_file(request: fastapi.Request, bg_tasks: BackgroundTasks):
raise HTTPException(status_code=400, detail="Invalid content type.")
try:
+ if upload_id:
+ file_upload_statuses.track(upload_id)
multipart_parser = GradioMultiPartParser(
request.headers,
request.stream(),
max_files=1000,
max_fields=1000,
+ upload_id=upload_id if upload_id else None,
+ upload_progress=file_upload_statuses if upload_id else None,
)
form = await multipart_parser.parse()
except MultiPartException as exc:
| diff --git a/js/app/test/audio_component_events.spec.ts b/js/app/test/audio_component_events.spec.ts
--- a/js/app/test/audio_component_events.spec.ts
+++ b/js/app/test/audio_component_events.spec.ts
@@ -7,7 +7,7 @@ test("Audio click-to-upload uploads audio successfuly.", async ({ page }) => {
const uploader = await page.locator("input[type=file]");
await Promise.all([
uploader.setInputFiles(["../../test/test_files/audio_sample.wav"]),
- page.waitForResponse("**/upload")
+ page.waitForResponse("**/upload?*")
]);
await expect(page.getByLabel("# Change Events")).toHaveValue("1");
@@ -21,7 +21,7 @@ test("Audio click-to-upload uploads audio successfuly.", async ({ page }) => {
await Promise.all([
uploader.setInputFiles(["../../test/test_files/audio_sample.wav"]),
- page.waitForResponse("**/upload")
+ page.waitForResponse("**/upload?*")
]);
await expect(page.getByLabel("# Change Events")).toHaveValue("3");
@@ -39,7 +39,7 @@ test("Audio drag-and-drop uploads a file to the server correctly.", async ({
"audio_sample.wav",
"audio/wav"
),
- page.waitForResponse("**/upload")
+ page.waitForResponse("**/upload?*")
]);
await expect(page.getByLabel("# Change Events")).toHaveValue("1");
await expect(page.getByLabel("# Upload Events")).toHaveValue("1");
diff --git a/js/app/test/video_component_events.spec.ts b/js/app/test/video_component_events.spec.ts
--- a/js/app/test/video_component_events.spec.ts
+++ b/js/app/test/video_component_events.spec.ts
@@ -9,7 +9,7 @@ test("Video click-to-upload uploads video successfuly. Clear, play, and pause bu
const uploader = await page.locator("input[type=file]");
await Promise.all([
uploader.setInputFiles(["./test/files/file_test.ogg"]),
- page.waitForResponse("**/upload")
+ page.waitForResponse("**/upload?*?*")
]);
await expect(page.getByLabel("# Change Events")).toHaveValue("1");
@@ -28,7 +28,7 @@ test("Video click-to-upload uploads video successfuly. Clear, play, and pause bu
await Promise.all([
uploader.setInputFiles(["./test/files/file_test.ogg"]),
- page.waitForResponse("**/upload")
+ page.waitForResponse("**/upload?*")
]);
await expect(page.getByLabel("# Change Events")).toHaveValue("3");
@@ -50,7 +50,7 @@ test("Video drag-and-drop uploads a file to the server correctly.", async ({
"file_test.ogg",
"video/*"
);
- await page.waitForResponse("**/upload");
+ await page.waitForResponse("**/upload?*");
await expect(page.getByLabel("# Change Events")).toHaveValue("1");
await expect(page.getByLabel("# Upload Events")).toHaveValue("1");
});
| Improve /upload experience while file is still loading (?)
From the 4.0 tracking issue.
| 2023-11-06T19:25:52 |
|
gradio-app/gradio | 6,660 | gradio-app__gradio-6660 | [
"6045"
] | fce80ac804dce1b1b22a8b5c575a35cf0356b82a | diff --git a/gradio/cli/commands/reload.py b/gradio/cli/commands/reload.py
--- a/gradio/cli/commands/reload.py
+++ b/gradio/cli/commands/reload.py
@@ -35,14 +35,14 @@ def _setup_config(
app_text = original_path.read_text(encoding=encoding)
patterns = [
- f"with gr\\.Blocks\\(\\) as {demo_name}",
+ f"with gr\\.Blocks\\(.*\\) as {demo_name}",
f"{demo_name} = gr\\.Blocks",
f"{demo_name} = gr\\.Interface",
f"{demo_name} = gr\\.ChatInterface",
f"{demo_name} = gr\\.TabbedInterface",
]
- if not any(re.search(p, app_text) for p in patterns):
+ if not any(re.search(p, app_text, flags=re.DOTALL) for p in patterns):
print(
f"\n[bold red]Warning[/]: Cannot statically find a gradio demo called {demo_name}. "
"Reload work may fail."
| If gradio.Blocks() is given any arguments, produces "Warning: Cannot statically find a gradio demo called demo. Reload work may fail."
### Describe the bug
According to Gradio documentation, `gr.Blocks()` can take arguments such as `analytics_enabled=False`. However, if `gr.Blocks()` is given any arguments in `with gr.Blocks() as demo:`, Gradio produces a warning:
````
Warning: Cannot statically find a gradio demo called demo. Reload work may fail.
````
This can be silenced by adding a commented line as follows
````
# with gr.Blocks() as demo:
`````
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
````
import gradio as gr
with gr.Blocks(analytics_disabled=False) as demo:
gr.Button()
if __name__ == "__main__":
demo.launch()
````
### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
Gradio Environment Information:
------------------------------
Operating System: Darwin
gradio version: 3.50.2
gradio_client version: 0.6.1
------------------------------------------------
gradio dependencies in your environment:
aiofiles: 23.2.1
altair: 5.1.2
fastapi: 0.104.0
ffmpy: 0.3.1
gradio-client==0.6.1 is not installed.
httpx: 0.25.0
huggingface-hub: 0.17.3
importlib-resources: 6.1.0
jinja2: 3.1.2
markupsafe: 2.1.3
matplotlib: 3.8.0
numpy: 1.26.1
orjson: 3.9.9
packaging: 23.2
pandas: 2.1.1
pillow: 9.5.0
pydantic: 2.4.2
pydub: 0.25.1
python-multipart: 0.0.6
pyyaml: 6.0.1
requests: 2.31.0
semantic-version: 2.10.0
typing-extensions: 4.8.0
uvicorn: 0.23.2
websockets: 11.0.3
authlib; extra == 'oauth' is not installed.
itsdangerous; extra == 'oauth' is not installed.
gradio_client dependencies in your environment:
fsspec: 2023.9.2
httpx: 0.25.0
huggingface-hub: 0.17.3
packaging: 23.2
requests: 2.31.0
typing-extensions: 4.8.0
websockets: 11.0.3
```
### Severity
I can work around it
| Interesting.. this only happens if you run your file in reload mode, right? (e.g. `gradio app.py`, but no warning if you do `python app.py`)?
That's correct.
Still an issue
Still an issue, the team can't resolve this basic issue sad | 2023-12-04T22:50:55 |
|
gradio-app/gradio | 6,738 | gradio-app__gradio-6738 | [
"6736"
] | bdf81fead86e1d5a29e6b036f1fff677f6480e6b | diff --git a/gradio/utils.py b/gradio/utils.py
--- a/gradio/utils.py
+++ b/gradio/utils.py
@@ -170,6 +170,8 @@ def iter_py_files() -> Iterator[Path]:
for reload_dir in reload_dirs:
for path in list(reload_dir.rglob("*.py")):
yield path.resolve()
+ for path in list(reload_dir.rglob("*.css")):
+ yield path.resolve()
module = None
reload_dirs = [Path(dir_) for dir_ in reloader.watch_dirs]
| Writing custom css is frustrating
- [x] I have searched to see if a similar issue already exists.
Writing custom css is incredibly frustrating, to the point where I'm not sure how people have the patience to even bother.
We need to improve this somehow. Have a few ideas that I'll add shortly.
| 2023-12-11T00:35:21 |
||
gradio-app/gradio | 6,780 | gradio-app__gradio-6780 | [
"6659"
] | a807ede818e0690949aca41020e75a96f0110ece | diff --git a/gradio/cli/commands/components/_create_utils.py b/gradio/cli/commands/components/_create_utils.py
--- a/gradio/cli/commands/components/_create_utils.py
+++ b/gradio/cli/commands/components/_create_utils.py
@@ -13,8 +13,8 @@
def _in_test_dir():
- """Check if the current working directory ends with gradio/js/gradio-preview/test."""
- return Path.cwd().parts[-4:] == ("gradio", "js", "gradio-preview", "test")
+ """Check if the current working directory ends with gradio/js/preview/test."""
+ return Path.cwd().parts[-4:] == ("gradio", "js", "preview", "test")
default_demo_code = """
diff --git a/gradio/cli/commands/components/build.py b/gradio/cli/commands/components/build.py
--- a/gradio/cli/commands/components/build.py
+++ b/gradio/cli/commands/components/build.py
@@ -1,6 +1,5 @@
import shutil
import subprocess
-import sys
from pathlib import Path
import semantic_version
@@ -83,7 +82,7 @@ def _build(
else:
live.update(":white_check_mark: Build succeeded!")
- cmds = [sys.executable, "-m", "build", str(name)]
+ cmds = [shutil.which("python"), "-m", "build", str(name)]
live.update(f":construction_worker: Building... [grey37]({' '.join(cmds)})[/]")
pipe = subprocess.run(cmds, capture_output=True, text=True)
if pipe.returncode != 0:
diff --git a/gradio/cli/commands/components/publish.py b/gradio/cli/commands/components/publish.py
--- a/gradio/cli/commands/components/publish.py
+++ b/gradio/cli/commands/components/publish.py
@@ -1,11 +1,9 @@
-from __future__ import annotations
-
import random
import re
import shutil
import tempfile
from pathlib import Path
-from typing import Optional
+from typing import List, Optional
import semantic_version
from huggingface_hub import HfApi
@@ -65,13 +63,13 @@ def _ignore(s, names):
return ignored
-def _get_version_from_file(dist_file: Path) -> str | None:
+def _get_version_from_file(dist_file: Path) -> Optional[str]:
match = re.search(r"-(\d+\.\d+\.\d+[a-zA-Z]*\d*)-", dist_file.name)
if match:
return match.group(1)
-def _get_max_version(distribution_files: list[Path]) -> str | None:
+def _get_max_version(distribution_files: List[Path]) -> Optional[str]:
versions = []
for p in distribution_files:
version = _get_version_from_file(p)
| diff --git a/js/app/test/audio_component_events.spec.ts b/js/app/test/audio_component_events.spec.ts
--- a/js/app/test/audio_component_events.spec.ts
+++ b/js/app/test/audio_component_events.spec.ts
@@ -7,10 +7,7 @@ test("Audio click-to-upload uploads audio successfuly. File downloading works an
.getByRole("button", { name: "Drop Audio Here - or - Click to Upload" })
.click();
const uploader = await page.locator("input[type=file]");
- await Promise.all([
- uploader.setInputFiles(["../../test/test_files/audio_sample.wav"]),
- page.waitForResponse("**/upload?*")
- ]);
+ await uploader.setInputFiles(["../../test/test_files/audio_sample.wav"]);
await expect(page.getByLabel("# Input Change Events")).toHaveValue("1");
await expect(page.getByLabel("# Input Upload Events")).toHaveValue("1");
@@ -21,10 +18,7 @@ test("Audio click-to-upload uploads audio successfuly. File downloading works an
.getByRole("button", { name: "Drop Audio Here - or - Click to Upload" })
.click();
- await Promise.all([
- uploader.setInputFiles(["../../test/test_files/audio_sample.wav"]),
- page.waitForResponse("**/upload?*")
- ]);
+ await uploader.setInputFiles(["../../test/test_files/audio_sample.wav"]);
await expect(page.getByLabel("# Input Change Events")).toHaveValue("3");
await expect(page.getByLabel("# Input Upload Events")).toHaveValue("2");
@@ -38,16 +32,13 @@ test("Audio click-to-upload uploads audio successfuly. File downloading works an
test("Audio drag-and-drop uploads a file to the server correctly.", async ({
page
}) => {
- await Promise.all([
- drag_and_drop_file(
- page,
- "input[type=file]",
- "../../test/test_files/audio_sample.wav",
- "audio_sample.wav",
- "audio/wav"
- ),
- page.waitForResponse("**/upload?*")
- ]);
+ await drag_and_drop_file(
+ page,
+ "input[type=file]",
+ "../../test/test_files/audio_sample.wav",
+ "audio_sample.wav",
+ "audio/wav"
+ );
await expect(page.getByLabel("# Input Change Events")).toHaveValue("1");
await expect(page.getByLabel("# Input Upload Events")).toHaveValue("1");
});
@@ -67,30 +58,24 @@ test("Audio drag-and-drop displays a warning when the file is of the wrong mime
test("Play, Pause, and stop events work correctly.", async ({ page }) => {
const uploader = await page.locator("input[type=file]");
- await Promise.all([
- uploader.setInputFiles(["../../demo/audio_debugger/cantina.wav"]),
- page.waitForResponse("**/upload?*")
- ]);
+ await uploader.setInputFiles(["../../demo/audio_debugger/cantina.wav"]);
+ const event_triggered = async (label: string) => {
+ const value = await page.getByLabel(label).inputValue();
+ expect(Number(value)).toBeGreaterThan(0);
+ };
await page
.getByTestId("waveform-Output Audio")
.getByLabel("Play", { exact: true })
.click();
+ await expect(async () => event_triggered("# Output Play Events")).toPass();
+
await page.getByTestId("waveform-Output Audio").getByLabel("Pause").click();
+ await expect(async () => event_triggered("# Output Pause Events")).toPass();
+
await page
.getByTestId("waveform-Output Audio")
.getByLabel("Play", { exact: true })
.click();
-
- const event_triggered = async (label: string) => {
- const value = await page.getByLabel(label).inputValue();
- expect(Number(value)).toBeGreaterThan(0);
- };
-
- // toPass will retry the function until it passes or times out
- // need this because the stop event is only triggered when the video is done playing
- // hard to time otherwise
- await expect(async () => event_triggered("# Output Play Events")).toPass();
- await expect(async () => event_triggered("# Output Pause Events")).toPass();
await expect(async () => event_triggered("# Output Stop Events")).toPass();
});
diff --git a/js/app/test/chatinterface_streaming_echo.spec.ts b/js/app/test/chatinterface_streaming_echo.spec.ts
--- a/js/app/test/chatinterface_streaming_echo.spec.ts
+++ b/js/app/test/chatinterface_streaming_echo.spec.ts
@@ -9,51 +9,30 @@ test("chatinterface works with streaming functions and all buttons behave as exp
const clear_button = await page.getByRole("button", { name: "🗑️ Clear" });
const textbox = await page.getByPlaceholder("Type a message...");
- let iterations: Promise<any>[] = [];
- page.on("websocket", (ws) => {
- iterations.push(
- ws.waitForEvent("framereceived", {
- predicate: (event) => {
- return (
- JSON.parse(event.payload as string).msg === "process_completed"
- );
- }
- })
- );
- });
-
await textbox.fill("hello");
await submit_button.click();
- await iterations[0];
await expect(textbox).toHaveValue("");
- await expect.poll(async () => page.locator(".chatbot p").count()).toBe(1);
const bot_message_0 = await page.locator(".bot.message").nth(0);
await expect(bot_message_0).toContainText("You typed: hello");
await textbox.fill("hi");
await submit_button.click();
- await iterations[1];
await expect(textbox).toHaveValue("");
- await expect.poll(async () => page.locator(".message.bot").count()).toBe(2);
const bot_message_1 = await page.locator(".bot").nth(1);
await expect(bot_message_1).toContainText("You typed: hi");
await retry_button.click();
- await iterations[2];
await expect(textbox).toHaveValue("");
- await expect(bot_message_1).toContainText("You typed: hi");
+ await expect(page.locator(".bot").nth(1)).toContainText("You typed: hi");
await undo_button.click();
- await iterations[3];
await expect.poll(async () => page.locator(".message.bot").count()).toBe(1);
await expect(textbox).toHaveValue("hi");
await textbox.fill("salaam");
await submit_button.click();
- await iterations[4];
await expect(textbox).toHaveValue("");
- await expect.poll(async () => page.locator(".bot.message").count()).toBe(2);
- await expect(bot_message_1).toContainText("You typed: salaam");
+ await expect(page.locator(".bot").nth(1)).toContainText("You typed: salaam");
await clear_button.click();
await expect.poll(async () => page.locator(".bot.message").count()).toBe(0);
diff --git a/js/app/test/image_component_events.spec.ts b/js/app/test/image_component_events.spec.ts
--- a/js/app/test/image_component_events.spec.ts
+++ b/js/app/test/image_component_events.spec.ts
@@ -6,37 +6,34 @@ test("Image click-to-upload uploads image successfuly. Clear button dispatches e
}) => {
await page.getByRole("button", { name: "Drop Image Here" }).click();
const uploader = await page.locator("input[type=file]");
- await Promise.all([
- uploader.setInputFiles(["./test/files/cheetah1.jpg"]),
- page.waitForResponse("**/upload?*?*")
- ]);
+ const change_counter = await page.getByLabel("# Change Events", {
+ exact: true
+ });
+ const clear_counter = await page.getByLabel("# Clear Events");
+ const upload_counter = await page.getByLabel("# Upload Events");
+ const change_output_counter = await page.getByLabel("# Change Events Output");
- await expect(page.getByLabel("# Change Events").first()).toHaveValue("1");
- await expect(await page.getByLabel("# Upload Events")).toHaveValue("1");
- await expect(await page.getByLabel("# Change Events Output")).toHaveValue(
- "1"
- );
+ await uploader.setInputFiles("./test/files/cheetah1.jpg");
+
+ await expect(change_counter).toHaveValue("1");
+ await expect(upload_counter).toHaveValue("1");
+ await expect(change_output_counter).toHaveValue("1");
const downloadPromise = page.waitForEvent("download");
await page.getByLabel("Download").click();
const download = await downloadPromise;
- // Automatically convert to png in the backend since PIL is very picky
+ // PIL converts from .jpg to .jpeg
await expect(download.suggestedFilename()).toBe("cheetah1.jpeg");
await page.getByLabel("Remove Image").click();
- await expect(page.getByLabel("# Clear Events")).toHaveValue("1");
- await expect(page.getByLabel("# Change Events").first()).toHaveValue("2");
+ await expect(clear_counter).toHaveValue("1");
+ await expect(change_counter).toHaveValue("2");
+ await expect(upload_counter).toHaveValue("1");
- await Promise.all([
- uploader.setInputFiles(["./test/files/gradio-logo.svg"]),
- page.waitForResponse("**/upload?*?*")
- ]);
-
- await expect(page.getByLabel("# Change Events").first()).toHaveValue("3");
- await expect(await page.getByLabel("# Upload Events")).toHaveValue("2");
- await expect(await page.getByLabel("# Change Events Output")).toHaveValue(
- "2"
- );
+ await uploader.setInputFiles("./test/files/gradio-logo.svg");
+ await expect(change_counter).toHaveValue("3");
+ await expect(upload_counter).toHaveValue("2");
+ await expect(change_output_counter).toHaveValue("2");
const SVGdownloadPromise = page.waitForEvent("download");
await page.getByLabel("Download").click();
@@ -45,16 +42,13 @@ test("Image click-to-upload uploads image successfuly. Clear button dispatches e
});
test("Image drag-to-upload uploads image successfuly.", async ({ page }) => {
- await Promise.all([
- drag_and_drop_file(
- page,
- "input[type=file]",
- "./test/files/cheetah1.jpg",
- "cheetag1.jpg",
- "image/*"
- ),
- page.waitForResponse("**/upload?*")
- ]);
+ await drag_and_drop_file(
+ page,
+ "input[type=file]",
+ "./test/files/cheetah1.jpg",
+ "cheetag1.jpg",
+ "image/*"
+ );
await expect(page.getByLabel("# Change Events").first()).toHaveValue("1");
await expect(page.getByLabel("# Upload Events")).toHaveValue("1");
});
@@ -71,9 +65,7 @@ test("Image copy from clipboard dispatches upload event.", async ({ page }) => {
navigator.clipboard.write([new ClipboardItem({ [blob.type]: blob })]);
});
- await page.pause();
await page.getByLabel("clipboard-image-toolbar-btn").click();
- await page.pause();
await expect(page.getByLabel("# Change Events").first()).toHaveValue("1");
await expect(page.getByLabel("# Upload Events")).toHaveValue("1");
});
diff --git a/js/app/test/model3d_component_events.spec.ts b/js/app/test/model3d_component_events.spec.ts
--- a/js/app/test/model3d_component_events.spec.ts
+++ b/js/app/test/model3d_component_events.spec.ts
@@ -7,18 +7,19 @@ test("Model3D click-to-upload uploads file successfuly. Upload and clear events
.getByRole("button", { name: "Drop File Here - or - Click to Upload" })
.click();
const uploader = await page.locator("input[type=file]");
- await Promise.all([
- uploader.setInputFiles(["./test/files/face.obj"]),
- page.waitForResponse("**/upload?*?*")
- ]);
+ const change_counter = await page.getByLabel("# Change Events");
+ const upload_counter = await page.getByLabel("# Upload Events");
+ const clear_counter = await page.getByLabel("# Clear Events");
- await expect(page.getByLabel("# Change Events")).toHaveValue("1");
- await expect(page.getByLabel("# Upload Events")).toHaveValue("1");
+ await uploader.setInputFiles("./test/files/face.obj");
+
+ await expect(change_counter).toHaveValue("1");
+ await expect(upload_counter).toHaveValue("1");
await page.getByLabel("Clear").nth(0).click();
- await expect(page.getByLabel("# Change Events")).toHaveValue("2");
- await expect(page.getByLabel("# Clear Events")).toHaveValue("1");
- await expect(page.getByLabel("Clear Value")).toHaveValue("");
+ await expect(change_counter).toHaveValue("2");
+ await expect(clear_counter).toHaveValue("1");
+ await expect(await page.getByLabel("Clear Value")).toHaveValue("");
const downloadPromise = page.waitForEvent("download");
await page.getByLabel("Download").click();
@@ -35,7 +36,6 @@ test("Model3D drag-and-drop uploads a file to the server correctly.", async ({
"./test/files/face.obj",
"face.obj"
);
- await page.waitForResponse("**/upload?*");
- await expect(page.getByLabel("# Change Events")).toHaveValue("1");
- await expect(page.getByLabel("# Upload Events")).toHaveValue("1");
+ await expect(await page.getByLabel("# Change Events")).toHaveValue("1");
+ await expect(await page.getByLabel("# Upload Events")).toHaveValue("1");
});
diff --git a/js/app/test/upload_button_component_events.spec.ts b/js/app/test/upload_button_component_events.spec.ts
--- a/js/app/test/upload_button_component_events.spec.ts
+++ b/js/app/test/upload_button_component_events.spec.ts
@@ -5,10 +5,7 @@ test("UploadButton properly dispatches load event and click event for the single
}) => {
await page.getByRole("button", { name: "Upload Single File" }).click();
const uploader = await page.getByTestId("Upload Single File-upload-button");
- await Promise.all([
- uploader.setInputFiles(["./test/files/face.obj"]),
- page.waitForResponse("**/upload?*?*")
- ]);
+ await uploader.setInputFiles(["./test/files/cheetah1.jpg"]);
await expect(page.getByLabel("# Load Upload Single File")).toHaveValue("1");
await expect(
@@ -18,7 +15,7 @@ test("UploadButton properly dispatches load event and click event for the single
const downloadPromise = page.waitForEvent("download");
await page.getByRole("link").nth(0).click();
const download = await downloadPromise;
- await expect(download.suggestedFilename()).toBe("face.obj");
+ await expect(download.suggestedFilename()).toBe("cheetah1.jpg");
});
test("UploadButton properly dispatches load event and click event for the multiple file case.", async ({
@@ -28,12 +25,9 @@ test("UploadButton properly dispatches load event and click event for the multip
const uploader = await page.getByTestId(
"Upload Multiple Files-upload-button"
);
- await Promise.all([
- uploader.setInputFiles([
- "./test/files/face.obj",
- "./test/files/cheetah1.jpg"
- ]),
- page.waitForResponse("**/upload?*?*")
+ await uploader.setInputFiles([
+ "./test/files/face.obj",
+ "./test/files/cheetah1.jpg"
]);
await expect(page.getByLabel("# Load Upload Multiple Files")).toHaveValue(
diff --git a/js/app/test/video_component_events.spec.ts b/js/app/test/video_component_events.spec.ts
--- a/js/app/test/video_component_events.spec.ts
+++ b/js/app/test/video_component_events.spec.ts
@@ -7,44 +7,44 @@ test("Video click-to-upload uploads video successfuly. Clear, play, and pause bu
.getByRole("button", { name: "Drop Video Here - or - Click to Upload" })
.click();
const uploader = await page.locator("input[type=file]");
- await Promise.all([
- uploader.setInputFiles(["./test/files/file_test.ogg"]),
- page.waitForResponse("**/upload?*?*")
- ]);
+ await uploader.setInputFiles(["./test/files/file_test.ogg"]);
await expect(page.getByLabel("# Change Events")).toHaveValue("1");
await expect(page.getByLabel("# Upload Events")).toHaveValue("1");
- await page.getByLabel("play-pause-replay-button").nth(0).click();
- await page.getByLabel("play-pause-replay-button").nth(0).click();
- await expect(page.getByLabel("# Play Events")).toHaveValue("1");
- await expect(page.getByLabel("# Pause Events")).toHaveValue("1");
-
await page.getByLabel("Clear").click();
await expect(page.getByLabel("# Change Events")).toHaveValue("2");
await page
.getByRole("button", { name: "Drop Video Here - or - Click to Upload" })
.click();
- await Promise.all([
- uploader.setInputFiles(["./test/files/file_test.ogg"]),
- page.waitForResponse("**/upload?*")
- ]);
+ await uploader.setInputFiles(["./test/files/file_test.ogg"]);
await expect(page.getByLabel("# Change Events")).toHaveValue("3");
await expect(page.getByLabel("# Upload Events")).toHaveValue("2");
- await page.getByLabel("play-pause-replay-button").first().click();
- await page.getByLabel("play-pause-replay-button").first().click();
- await expect(page.getByLabel("# Play Events")).toHaveValue("2");
- await expect(page.getByLabel("# Pause Events")).toHaveValue("2");
-
const downloadPromise = page.waitForEvent("download");
await page.getByLabel("Download").click();
const download = await downloadPromise;
await expect(download.suggestedFilename()).toBe("file_test.ogg");
});
+test("Video play, pause events work correctly.", async ({ page }) => {
+ await page
+ .getByRole("button", { name: "Drop Video Here - or - Click to Upload" })
+ .click();
+ const uploader = await page.locator("input[type=file]");
+ await uploader.setInputFiles(["./test/files/file_test.ogg"]);
+
+ // Wait change event to trigger
+ await expect(page.getByLabel("# Change Events")).toHaveValue("1");
+
+ await page.getByLabel("play-pause-replay-button").first().click();
+ await expect(page.getByLabel("# Play Events")).toHaveValue("1");
+ await page.getByLabel("play-pause-replay-button").first().click();
+ await expect(page.getByLabel("# Pause Events")).toHaveValue("1");
+});
+
test("Video drag-and-drop uploads a file to the server correctly.", async ({
page
}) => {
@@ -55,7 +55,6 @@ test("Video drag-and-drop uploads a file to the server correctly.", async ({
"file_test.ogg",
"video/*"
);
- await page.waitForResponse("**/upload?*");
await expect(page.getByLabel("# Change Events")).toHaveValue("1");
await expect(page.getByLabel("# Upload Events")).toHaveValue("1");
});
diff --git a/test/requirements.txt b/test/requirements.txt
--- a/test/requirements.txt
+++ b/test/requirements.txt
@@ -141,6 +141,7 @@ pytest==7.1.2
# -r requirements.in
# pytest-asyncio
# pytest-cov
+pytest-virtualenv==1.7.0
pytest-asyncio==0.19.0
# via -r requirements.in
pytest-cov==3.0.0
diff --git a/test/test_gradio_component_cli.py b/test/test_gradio_component_cli.py
--- a/test/test_gradio_component_cli.py
+++ b/test/test_gradio_component_cli.py
@@ -1,12 +1,11 @@
+import shutil
import textwrap
from pathlib import Path
import pytest
from gradio.cli.commands.components._create_utils import OVERRIDES
-from gradio.cli.commands.components.build import _build
from gradio.cli.commands.components.create import _create
-from gradio.cli.commands.components.install_component import _install
from gradio.cli.commands.components.publish import _get_version_from_file
from gradio.cli.commands.components.show import _show
@@ -107,38 +106,44 @@ def test_show(capsys):
@pytest.mark.xfail
[email protected]("template", ["Audio", "Video", "Image", "Textbox"])
-def test_build(template, tmp_path):
- _create(
- "TestTextbox",
- template=template,
- directory=tmp_path,
- overwrite=True,
- install=True,
- configure_metadata=False,
- )
- _build(tmp_path, build_frontend=True)
- template_dir: Path = (
- tmp_path.resolve() / "backend" / "gradio_testtextbox" / "templates"
- )
- assert template_dir.exists() and template_dir.is_dir()
- assert list(template_dir.glob("**/index.js"))
- assert (tmp_path / "dist").exists() and list((tmp_path / "dist").glob("*.whl"))
-
-
-def test_install(tmp_path):
- _create(
- "TestTextbox",
- template="Textbox",
- directory=tmp_path,
- overwrite=True,
- install=False,
- configure_metadata=False,
- )
-
- assert not (tmp_path / "frontend" / "node_modules").exists()
- _install(tmp_path)
- assert (tmp_path / "frontend" / "node_modules").exists()
[email protected]("template", ["Image"])
+def test_build(template, virtualenv):
+ # Copy pnpm-lock.yaml to not cause unintended changes tracked by git
+ pnpm_lock = Path(__file__).parent / ".." / "pnpm-lock.yaml"
+ pnpm_copy = Path(__file__).parent / ".." / "pnpm-lock-copy.yaml"
+ shutil.copy(str(pnpm_lock), str(pnpm_copy))
+
+ # Using the js/preview/test directory will use the workspace code
+ dir_ = (
+ Path(__file__).parent / ".." / "js" / "preview" / "test" / "testtextbox"
+ ).resolve()
+ shutil.rmtree(str(dir_), ignore_errors=True)
+
+ try:
+ # Local installs of gradio and gradio-client
+ gradio_dir = Path(__file__).parent / ".."
+ client = Path(__file__).parent / ".." / "client" / "python"
+ virtualenv.run("pip install build")
+ virtualenv.run(f"pip install -e {str(gradio_dir)}")
+ virtualenv.run(f"pip install -e {str(client)}")
+
+ virtualenv.run(
+ f"{shutil.which('gradio')} cc create TestTextbox --template {template} --no-configure-metadata --directory {str(dir_)}",
+ )
+ assert (dir_ / "frontend" / "node_modules").exists()
+
+ # need to reinstall local client because installing the custom component
+ # will pull latest stable version from pypi
+ virtualenv.run(f"pip install -e {str(client)}")
+ virtualenv.run(f"{shutil.which('gradio')} cc build {str(dir_)}")
+
+ template_dir: Path = dir_ / "backend" / "gradio_testtextbox" / "templates"
+ assert template_dir.exists() and template_dir.is_dir()
+ assert list(template_dir.glob("**/index.js"))
+ assert (dir_ / "dist").exists() and list((dir_ / "dist").glob("*.whl"))
+ finally:
+ shutil.move(str(pnpm_copy), str(pnpm_lock))
+ shutil.rmtree(str(dir_), ignore_errors=True)
def test_fallback_template_app(tmp_path):
| Fix `test/test_gradio_component_cli.py:141 ` that breaks only on release PR
### Describe the bug
On every release PR, there's a backend test that fails because the JS packages have not been published yet.
E.g. https://github.com/gradio-app/gradio/actions/runs/7093296419/job/19306412188?pr=6575
<img width="1020" alt="image" src="https://github.com/gradio-app/gradio/assets/1778297/2d70e035-cf86-4726-b45d-134bbad1cb02">
It'd be great if we can fix this so that we can rely on this test when releasing
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
https://github.com/gradio-app/gradio/actions/runs/7093296419/job/19306412188?pr=6575
### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
gradio==4.7.1
```
### Severity
I can work around it
| 2023-12-13T22:56:35 |
|
gradio-app/gradio | 6,787 | gradio-app__gradio-6787 | [
"7006"
] | 0a42e96537ab11b4dc441c5c2780938be7faf464 | diff --git a/gradio/cli/commands/components/_create_utils.py b/gradio/cli/commands/components/_create_utils.py
--- a/gradio/cli/commands/components/_create_utils.py
+++ b/gradio/cli/commands/components/_create_utils.py
@@ -285,6 +285,10 @@ def ignore(_src, names):
source_package_json = _modify_js_deps(source_package_json, "dependencies", p)
source_package_json = _modify_js_deps(source_package_json, "devDependencies", p)
(frontend / "package.json").write_text(json.dumps(source_package_json, indent=2))
+ shutil.copy(
+ str(Path(__file__).parent / "files" / "gradio.config.js"),
+ frontend / "gradio.config.js",
+ )
def _replace_old_class_name(old_class_name: str, new_class_name: str, content: str):
| diff --git a/js/preview/test/test/frontend/Index.svelte b/js/preview/test/test/frontend/Index.svelte
--- a/js/preview/test/test/frontend/Index.svelte
+++ b/js/preview/test/test/frontend/Index.svelte
@@ -1,4 +1,5 @@
<script lang="ts">
+ import "./main.css";
import { JsonView } from "@zerodevx/svelte-json-view";
import type { Gradio } from "@gradio/utils";
@@ -25,12 +26,88 @@
}>;
</script>
-<Block {visible} {elem_id} {elem_classes} {container} {scale} {min_width}>
- <StatusTracker
- autoscroll={gradio.autoscroll}
- i18n={gradio.i18n}
- {...loading_status}
- />
-
- <JsonView json={value} />
-</Block>
+<div class="relative flex min-h-screen flex-col justify-center overflow-hidden">
+ <div
+ class="relative bg-white px-6 pt-10 pb-8 shadow-xl ring-1 ring-gray-900/5 sm:mx-auto sm:max-w-lg sm:rounded-lg sm:px-10"
+ >
+ <div class="mx-auto max-w-md">
+ <h1 class="text-xl! font-bold! text-gray-900">
+ <span class="text-blue-500">Tailwind</span> in Gradio
+ </h1>
+ <h2><em>(i hope you're happy now)</em></h2>
+ <div class="divide-y divide-gray-300/50">
+ <div class="space-y-6 py-8 text-base leading-7 text-gray-600">
+ <p>
+ An advanced online playground for Tailwind CSS, including support
+ for things like:
+ </p>
+ <ul class="space-y-4 my-4!">
+ <li class="flex items-center">
+ <svg
+ class="h-6 w-6 flex-none fill-sky-100 stroke-sky-500 stroke-2 mr-4"
+ stroke-linecap="round"
+ stroke-linejoin="round"
+ >
+ <circle cx="12" cy="12" r="11" />
+ <path
+ d="m8 13 2.165 2.165a1 1 0 0 0 1.521-.126L16 9"
+ fill="none"
+ />
+ </svg>
+ <p class="ml-4">
+ Customizing your
+ <code class="text-sm font-bold text-gray-900"
+ >tailwind.config.js</code
+ > file
+ </p>
+ </li>
+ <li class="flex items-center">
+ <svg
+ class="h-6 w-6 flex-none fill-sky-100 stroke-sky-500 stroke-2 mr-4"
+ stroke-linecap="round"
+ stroke-linejoin="round"
+ >
+ <circle cx="12" cy="12" r="11" />
+ <path
+ d="m8 13 2.165 2.165a1 1 0 0 0 1.521-.126L16 9"
+ fill="none"
+ />
+ </svg>
+ <p class="ml-4">
+ Extracting classes with
+ <code class="text-sm font-bold text-gray-900">@apply</code>
+ </p>
+ </li>
+ <li class="flex items-center">
+ <svg
+ class="h-6 w-6 flex-none fill-sky-100 stroke-sky-500 stroke-2 mr-4"
+ stroke-linecap="round"
+ stroke-linejoin="round"
+ >
+ <circle cx="12" cy="12" r="11" />
+ <path
+ d="m8 13 2.165 2.165a1 1 0 0 0 1.521-.126L16 9"
+ fill="none"
+ />
+ </svg>
+ <p class="ml-4">Code completion with instant preview</p>
+ </li>
+ </ul>
+ <p>
+ Perfect for learning how the framework works, prototyping a new
+ idea, or creating a demo to share online.
+ </p>
+ </div>
+ <div class="pt-8 text-base font-semibold leading-7">
+ <p class="text-gray-900">Want to dig deeper into Tailwind?</p>
+ <p>
+ <a
+ href="https://tailwindcss.com/docs"
+ class="text-sky-500 hover:text-sky-600">Read the docs →</a
+ >
+ </p>
+ </div>
+ </div>
+ </div>
+ </div>
+</div>
diff --git a/js/preview/test/test/frontend/gradio.config.js b/js/preview/test/test/frontend/gradio.config.js
new file mode 100644
--- /dev/null
+++ b/js/preview/test/test/frontend/gradio.config.js
@@ -0,0 +1,8 @@
+import tailwindcss from "@tailwindcss/vite";
+
+export default {
+ plugins: [tailwindcss()],
+ svelte: {
+ preprocess: require("svelte-preprocess")()
+ }
+};
diff --git a/js/preview/test/test/frontend/main.css b/js/preview/test/test/frontend/main.css
new file mode 100644
--- /dev/null
+++ b/js/preview/test/test/frontend/main.css
@@ -0,0 +1 @@
+@import "tailwindcss";
diff --git a/js/preview/test/test/frontend/package.json b/js/preview/test/test/frontend/package.json
--- a/js/preview/test/test/frontend/package.json
+++ b/js/preview/test/test/frontend/package.json
@@ -19,6 +19,8 @@
"@zerodevx/svelte-json-view": "^1.0.7"
},
"devDependencies": {
- "@gradio/preview": "workspace:^"
+ "@gradio/preview": "workspace:^",
+ "@tailwindcss/vite": "4.0.0-alpha.14",
+ "tailwindcss": "4.0.0-alpha.14"
}
}
| [Custom Components] Provide some configuration for building frontend
- [x] I have searched to see if a similar issue already exists.
**Is your feature request related to a problem? Please describe.**
Some javascript libraries need additional processing to be used correctly, could gradio provide some configuration to do this?
**Additional context**
I found this PR before: https://github.com/gradio-app/gradio/pull/6787, will there be more updates?
| 2023-12-14T14:31:38 |
|
gradio-app/gradio | 7,151 | gradio-app__gradio-7151 | [
"7136"
] | 138761226a8070fb18d4f34c757b78d51e1c6101 | diff --git a/client/python/gradio_client/client.py b/client/python/gradio_client/client.py
--- a/client/python/gradio_client/client.py
+++ b/client/python/gradio_client/client.py
@@ -1557,7 +1557,10 @@ def __next__(self) -> tuple | Any:
o = self.communicator.job.outputs[self._counter]
self._counter += 1
return o
- if self.communicator.job.latest_status.code == Status.FINISHED:
+ if (
+ self.communicator.job.latest_status.code == Status.FINISHED
+ and self._counter >= len(self.communicator.job.outputs)
+ ):
raise StopIteration()
time.sleep(0.001)
| client incorrectly iterates counter, and standard while out in job loop leaves left over outputs not consumed
### Describe the bug
```
with self.communicator.lock:
if len(self.communicator.job.outputs) == self._counter + 1:
o = self.communicator.job.outputs[self._counter]
self._counter += 1
return o
if self.communicator.job.latest_status.code == Status.FINISHED:
raise StopIteration()
```
Issue is that by time code reaches the first conditional, the number of job.outputs may already be 2+ while self._counter=0 at first.
So everything hangs until the end. I find this is quite common.
Additionally, the job finished can be reached, but there are still extra things to be consumed. So I have to add extra code after the normal `for out in job` to catch this extra stuff, making the client annoying to code for.
In the end I have to hack together a bunch of external code to work around these issues.
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
A detailed repro is not required and the problem is self-evident from the code.
I can put together a PR if desired.
### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
gradio 4.15.0 or also older gradio too.
```
### Severity
Blocking usage of gradio
| e.g. this works:
```
from gradio_client.utils import Status
n = 0
t0 = time.time()
while True:
if not job.communicator:
break
time.sleep(0.001)
if len(job.outputs()) - 1 >= n:
audio_str = job.outputs()[n]
print("n=%s/%s dt=%s" % (n, len(job.outputs()) - 1, (time.time() - t0)))
t0 = time.time()
n += 1
if do_play:
play_audio_str(audio_str)
n_outputs = len(job.outputs()) # must be outside lock below
with job.communicator.lock:
if job.communicator.job.latest_status.code == Status.FINISHED and n >= n_outputs:
break
``` | 2024-01-25T04:28:57 |
|
gradio-app/gradio | 7,229 | gradio-app__gradio-7229 | [
"7213"
] | 60078df07f38c0ef90ef2ba90f8e3272b92c9e3d | diff --git a/gradio/cli/commands/components/publish.py b/gradio/cli/commands/components/publish.py
--- a/gradio/cli/commands/components/publish.py
+++ b/gradio/cli/commands/components/publish.py
@@ -141,7 +141,7 @@ def _publish(
]
wheel_file = max(
(p for p in distribution_files if p.suffix == ".whl"),
- key=lambda s: semantic_version.Version(str(s).split("-")[1]),
+ key=lambda s: semantic_version.Version(str(s.name).split("-")[1]),
)
if not wheel_file:
raise ValueError(
| Custom component feedback
When I was building my custom component I ran into a number of issues. Dumping here as feedback while we figure out what action to take (if any). The component I was working on was bootstrapped back in prerelease so some of these issues may not be present any more. I will be building a new component this week and will check.
- tooling / configuration
- if the `project.readme` isn't set correctly you get a weird error about long description.
- I didn't have `gradio` in my dependencies field.
- gradio cc publish failed catastrophically when trying to find the latest versions of the wheel in CI meaning I had to publish manually. (Will make reproduction and file an issue)
- runtime
- some small css bugs. We should all build some stuff to make sure various internal components work correctly when used in different contexts
- the static / interactive flipping causes many issues. Created a separate issue for this.
- custom status tracker was trickier than I expected, might have just been the above issue complicating things though. The props are definitely a bit weird, I think we could clean this up.
- I still have no idea what all of our CSS variables are, it is really difficult to figure out what to use.
- I don't know what all of our components are (especially atoms) and it isn't obvious how to use them.
Other than two obvious bugs (that there are issues for) I think some of these have been resolved in recent versions or are simply a case of improving the docs.
All that said, it still cool how we have the whole workflow pretty self contained. And other than a few specific issues things worked really nicely. A little more refinement and documentation and this will be really slick I think.
| Nice issue @pngwn. The issues around the tooling are already fixed in 4.15 except the issue you saw with publish. Agreed we can make the documentation/discover of CSS and our components much better. | 2024-01-31T01:13:54 |
|
gradio-app/gradio | 7,354 | gradio-app__gradio-7354 | [
"7349"
] | d56bb28df80d8db1f33e4acf4f6b2c4f87cb8b28 | diff --git a/demo/dataframe_tab/run.py b/demo/dataframe_tab/run.py
new file mode 100644
--- /dev/null
+++ b/demo/dataframe_tab/run.py
@@ -0,0 +1,12 @@
+import gradio as gr
+
+with gr.Blocks() as demo:
+ with gr.Tab():
+ gr.HTML("<p>hi</p>")
+ with gr.Tab():
+ gr.Dataframe(
+ value=[[i + 1] for i in range(10)],
+ )
+
+if __name__ == "__main__":
+ demo.launch()
| Dataframe appears empty on mobile
### Describe the bug
Dataframe appears empty on mobile
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
https://chat.lmsys.org/?leaderboard
(working on better repro)
### Screenshot

### Logs
_No response_
### System Info
```shell
gradio==4.17.0
```
### Severity
Blocking usage of gradio
| Okay, i have managed to create a minimal repro for this here: https://pngwn-7349-df-repro.hf.space/
It is only reproducible on specific devices. Here is a browserstack test with iPhone 15 / Safari 17.2 / Chrome.
https://github.com/gradio-app/gradio/assets/12937446/b17a195c-1bf2-4d76-98d7-6121a7479c1a
This bug occurs when the Dataframe is in a hidden tab. Tabs only set the display properties of the contents they contain, they don't remove things from the page. I believe that the dataframe is trying to measure things when it is mounted to the DOM, which in this case is when it is invisible. I don't think it _re_measures itself when the visibility changes.
I do not know why iOS has different behaviour in chrome/ safari because they have the same rendering engine. Weird but it is what it is.
Things that don't affect it (as far as I can tell):
- The size of the dataframe, the size of the contents, the size of the window (and any strange combination of those, such as the DF being larger than the window, the DF contents being larger than the DF, etc.)
- The data types. I wondered if markdown was causing this but it isn't. It always happens.
- The amount of content (large datasets), i thought this issue was due to leadboard datasets being generally quite large (many columns/ rows) but the main lmsys leaderboard isn't even that big and as you can see in the repro, it doesn't even matter.
- Slow devices. I have testsed with very large datasets with CPU slow down and network slow down and the behaviour is the same. I cant repro it locally.
In short this happens very very consistently as long as the Dataframe is first rendered in a hidden tab.
I have not yet worked out how to fix it but I'm working on that now. | 2024-02-08T15:42:54 |
|
gradio-app/gradio | 7,410 | gradio-app__gradio-7410 | [
"7175"
] | 065c5b163c4badb9d9cbd06d627fb4ba086003e7 | diff --git a/demo/chatinterface_streaming_echo/run.py b/demo/chatinterface_streaming_echo/run.py
--- a/demo/chatinterface_streaming_echo/run.py
+++ b/demo/chatinterface_streaming_echo/run.py
@@ -1,10 +1,12 @@
import time
import gradio as gr
+
def slow_echo(message, history):
for i in range(len(message)):
time.sleep(0.05)
- yield "You typed: " + message[: i+1]
+ yield "You typed: " + message[: i + 1]
+
demo = gr.ChatInterface(slow_echo).queue()
| diff --git a/demo/test_chatinterface_streaming_echo/run.ipynb b/demo/test_chatinterface_streaming_echo/run.ipynb
new file mode 100644
--- /dev/null
+++ b/demo/test_chatinterface_streaming_echo/run.ipynb
@@ -0,0 +1 @@
+{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: test_chatinterface_streaming_echo"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "runs = 0\n", "\n", "\n", "def slow_echo(message, history):\n", " global runs # i didn't want to add state or anything to this demo\n", " runs = runs + 1\n", " for i in range(len(message)):\n", " yield f\"Run {runs} - You typed: \" + message[: i + 1]\n", "\n", "\n", "demo = gr.ChatInterface(slow_echo).queue()\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
\ No newline at end of file
diff --git a/demo/test_chatinterface_streaming_echo/run.py b/demo/test_chatinterface_streaming_echo/run.py
new file mode 100644
--- /dev/null
+++ b/demo/test_chatinterface_streaming_echo/run.py
@@ -0,0 +1,16 @@
+import gradio as gr
+
+runs = 0
+
+
+def slow_echo(message, history):
+ global runs # i didn't want to add state or anything to this demo
+ runs = runs + 1
+ for i in range(len(message)):
+ yield f"Run {runs} - You typed: " + message[: i + 1]
+
+
+demo = gr.ChatInterface(slow_echo).queue()
+
+if __name__ == "__main__":
+ demo.launch()
diff --git a/js/app/test/chatinterface_streaming_echo.spec.ts b/js/app/test/chatinterface_streaming_echo.spec.ts
deleted file mode 100644
--- a/js/app/test/chatinterface_streaming_echo.spec.ts
+++ /dev/null
@@ -1,43 +0,0 @@
-import { test, expect } from "@gradio/tootils";
-
-test("chatinterface works with streaming functions and all buttons behave as expected", async ({
- page
-}) => {
- const submit_button = await page.getByRole("button", { name: "Submit" });
- const retry_button = await page.getByRole("button", { name: "🔄 Retry" });
- const undo_button = await page.getByRole("button", { name: "↩️ Undo" });
- const clear_button = await page.getByRole("button", { name: "🗑️ Clear" });
- const textbox = await page.getByPlaceholder("Type a message...");
-
- await textbox.fill("hello");
- await submit_button.click();
- await expect(textbox).toHaveValue("");
- const bot_message_0 = await page.locator(".bot.message").nth(0);
- await expect(bot_message_0).toContainText("You typed: hello");
-
- await textbox.fill("hi");
- await submit_button.click();
- await expect(textbox).toHaveValue("");
- const bot_message_1 = await page.locator(".bot").nth(1);
- await expect(bot_message_1).toContainText("You typed: hi");
-
- await retry_button.click();
- await expect(textbox).toHaveValue("");
- await expect(page.locator(".bot").nth(1)).toContainText("You typed: hi");
-
- await undo_button.click();
- await expect
- .poll(async () => page.locator(".message.bot").count(), { timeout: 5000 })
- .toBe(1);
- await expect(textbox).toHaveValue("hi");
-
- await textbox.fill("salaam");
- await submit_button.click();
- await expect(textbox).toHaveValue("");
- await expect(page.locator(".bot").nth(1)).toContainText("You typed: salaam");
-
- await clear_button.click();
- await expect
- .poll(async () => page.locator(".bot.message").count(), { timeout: 5000 })
- .toBe(0);
-});
diff --git a/js/app/test/test_chatinterface_streaming_echo.spec.ts b/js/app/test/test_chatinterface_streaming_echo.spec.ts
new file mode 100644
--- /dev/null
+++ b/js/app/test/test_chatinterface_streaming_echo.spec.ts
@@ -0,0 +1,67 @@
+import { test, expect } from "@gradio/tootils";
+
+test("chatinterface works with streaming functions and all buttons behave as expected", async ({
+ page
+}) => {
+ const submit_button = page.getByRole("button", { name: "Submit" });
+ const retry_button = page.getByRole("button", { name: "🔄 Retry" });
+ const undo_button = page.getByRole("button", { name: "↩️ Undo" });
+ const clear_button = page.getByRole("button", { name: "🗑️ Clear" });
+ const textbox = page.getByPlaceholder("Type a message...");
+
+ await textbox.fill("hello");
+ await submit_button.click();
+
+ await expect(textbox).toHaveValue("");
+ const expected_text_el_0 = page.locator(".bot p", {
+ hasText: "Run 1 - You typed: hello"
+ });
+ await expect(expected_text_el_0).toBeVisible();
+ await expect
+ .poll(async () => page.locator(".bot.message").count(), { timeout: 2000 })
+ .toBe(1);
+
+ await textbox.fill("hi");
+ await submit_button.click();
+ await expect(textbox).toHaveValue("");
+ const expected_text_el_1 = page.locator(".bot p", {
+ hasText: "Run 2 - You typed: hi"
+ });
+ await expect(expected_text_el_1).toBeVisible();
+ await expect
+ .poll(async () => page.locator(".bot.message").count(), { timeout: 2000 })
+ .toBe(2);
+
+ await undo_button.click();
+ await expect
+ .poll(async () => page.locator(".message.bot").count(), { timeout: 5000 })
+ .toBe(1);
+ await expect(textbox).toHaveValue("hi");
+
+ await retry_button.click();
+ const expected_text_el_2 = page.locator(".bot p", {
+ hasText: "Run 3 - You typed: hello"
+ });
+ expect(textbox).toHaveValue("");
+ await expect(expected_text_el_2).toBeVisible();
+
+ await expect
+ .poll(async () => page.locator(".message.bot").count(), { timeout: 5000 })
+ .toBe(1);
+
+ await textbox.fill("hi");
+ await submit_button.click();
+ await expect(textbox).toHaveValue("");
+ const expected_text_el_3 = page.locator(".bot p", {
+ hasText: "Run 4 - You typed: hi"
+ });
+ await expect(expected_text_el_3).toBeVisible();
+ await expect
+ .poll(async () => page.locator(".bot.message").count(), { timeout: 2000 })
+ .toBe(2);
+
+ await clear_button.click();
+ await expect
+ .poll(async () => page.locator(".bot.message").count(), { timeout: 5000 })
+ .toBe(0);
+});
| Need to rework the 'static while pending' behaviour.
### Describe the bug
#6157 and #6160 implemented some behaviour where components are set to static when they are waiting for input (see #4733 ); this is very problematic (especially for inputs that are outputs):
This works well for form inputs, but for many components, setting them to static destroys the core component and reinitialises a new one. This causes specific issues:
- Data loss: any state held only in that component (specific props, other internal state) is completely lost and cannot be recreated.
- Performance: Some components are very expensive to initialise, and a lot of work has gone into making sure the minimum work happens when the data is updated.
- StatusTrackers: Any components implementing different status trackers for static and interactive mode will not work correctly. This is especially confusing for custom component authors unfamiliar with the internals. The static StatusTracker is the only one that will be displayed because the component will always be in 'static' mode.
I'm not sure this needs to be addressed at an architectural level. Components are best placed to know how they should 'disabled' themselves when events are pending (just as they are best placed to decide what their loading states should look like) and that information is already passed down via the `loading_status` prop. For form elements, as mentioned in the issue, they should set their status to `disabled`, maybe with an indicator, to resolve the issue. For components with a full-screen status tracker, it isn't an issue. Custom components can choose their own route.
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
```python
import gradio as gr
```
### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
-
```
### Severity
I can work around it
| 2024-02-13T18:21:12 |
|
gradio-app/gradio | 7,417 | gradio-app__gradio-7417 | [
"6627"
] | c2dfc592a4988efd5a96a062eec3fb4906f71748 | diff --git a/gradio/components/file_explorer.py b/gradio/components/file_explorer.py
--- a/gradio/components/file_explorer.py
+++ b/gradio/components/file_explorer.py
@@ -66,7 +66,7 @@ def __init__(
scale: relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.
min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
height: The maximum height of the file component, specified in pixels if a number is passed, or in CSS units if a string is passed. If more files are uploaded than can fit in the height, a scrollbar will appear.
- interactive: if True, will allow users to upload a file; if False, can only be used to display files. If not provided, this is inferred based on whether the component is used as an input or output.
+ interactive: if True, will allow users to select file(s); if False, will only display files. If not provided, this is inferred based on whether the component is used as an input or output.
visible: If False, component will be hidden.
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
| The `interactive` parameter in `gr.FileExplorer` has no effect. It should either be correctly documented or the feature should be added
- [X] I have searched to see if a similar issue already exists.
### Is your feature request related to a problem?
interactive - if True, will allow users to upload a file; if False, can only be used to display files. If not provided, this is inferred based on whether the component is used as an input or output.
This functionality apparently does nothing. I used like this:
```python
gr.FileExplorer(root='./voices', height=400, interactive=True, label="voices available", file_count="multiple", glob="**/*.*", ignore_glob="*.db,")
```
Either it should be documented correctly, or the ability to upload files should be added:
This functionality could be like google colab file explorer, enabling users to handle their files without need direct access to server.
And also could allow see empty folders on root folder I defined

| I'm torn on this, making it part of the component would be a much slicker UI/ UX for users but also we need to sensible defaults so users don't accidentally give everyone access to the machine. This can technically be accomplished without adding these features to the FileExplorer but the UI/UX won't be as nice.
@pngwn I believe root param may handle access to files. the way it is today it only depends on the developer make a mistake giving access to wrong path. Adding buttom to upload file only for root folder defined on param and give an option to limit extension types may protect mistakes.
| 2024-02-14T16:53:53 |
|
gradio-app/gradio | 7,564 | gradio-app__gradio-7564 | [
"7633"
] | aba44707af20b78a7b87f5f7bd3f189d971afc71 | diff --git a/demo/audio_debugger/run.py b/demo/audio_debugger/run.py
--- a/demo/audio_debugger/run.py
+++ b/demo/audio_debugger/run.py
@@ -9,18 +9,32 @@
with gr.Tab("Audio"):
gr.Audio(audio_file)
with gr.Tab("Interface"):
- gr.Interface(lambda x:x, "audio", "audio", examples=[audio_file], cache_examples=True)
+ gr.Interface(
+ lambda x: x, "audio", "audio", examples=[audio_file], cache_examples=True
+ )
with gr.Tab("Streaming"):
- gr.Interface(lambda x:x, gr.Audio(streaming=True), "audio", examples=[audio_file], cache_examples=True)
+ gr.Interface(
+ lambda x: x,
+ gr.Audio(streaming=True),
+ "audio",
+ examples=[audio_file],
+ cache_examples=True,
+ )
with gr.Tab("console"):
ip = gr.Textbox(label="User IP Address")
- gr.Interface(lambda cmd:subprocess.run([cmd], capture_output=True, shell=True).stdout.decode('utf-8').strip(), "text", "text")
-
+ gr.Interface(
+ lambda cmd: subprocess.run([cmd], capture_output=True, shell=True)
+ .stdout.decode("utf-8")
+ .strip(),
+ "text",
+ "text",
+ )
+
def get_ip(request: gr.Request):
return request.client.host
-
+
demo.load(get_ip, None, ip)
-
+
if __name__ == "__main__":
demo.queue()
demo.launch()
diff --git a/demo/hello_blocks/run.py b/demo/hello_blocks/run.py
--- a/demo/hello_blocks/run.py
+++ b/demo/hello_blocks/run.py
@@ -1,8 +1,10 @@
import gradio as gr
+
def greet(name):
return "Hello " + name + "!"
+
with gr.Blocks() as demo:
name = gr.Textbox(label="Name")
output = gr.Textbox(label="Output Box")
@@ -10,4 +12,4 @@ def greet(name):
greet_btn.click(fn=greet, inputs=name, outputs=output, api_name="greet")
if __name__ == "__main__":
- demo.launch()
\ No newline at end of file
+ demo.launch()
| diff --git a/js/app/src/init.test.ts b/js/app/src/init.test.ts
new file mode 100644
--- /dev/null
+++ b/js/app/src/init.test.ts
@@ -0,0 +1,521 @@
+import { describe, test, expect, vi } from "vitest";
+import { spy } from "tinyspy";
+import { setupServer } from "msw/node";
+import { http, HttpResponse } from "msw";
+import type { client_return } from "@gradio/client";
+import { Dependency, TargetMap } from "./types";
+import {
+ process_frontend_fn,
+ create_target_meta,
+ determine_interactivity,
+ process_server_fn,
+ get_component
+} from "./init";
+
+describe("process_frontend_fn", () => {
+ test("empty source code returns null", () => {
+ const source = "";
+
+ const fn = process_frontend_fn(source, false, 1, 1);
+ expect(fn).toBe(null);
+ });
+
+ test("falsey source code returns null: false", () => {
+ const source = false;
+
+ const fn = process_frontend_fn(source, false, 1, 1);
+ expect(fn).toBe(null);
+ });
+
+ test("falsey source code returns null: undefined", () => {
+ const source = undefined;
+
+ const fn = process_frontend_fn(source, false, 1, 1);
+ expect(fn).toBe(null);
+ });
+
+ test("falsey source code returns null: null", () => {
+ const source = null;
+
+ const fn = process_frontend_fn(source, false, 1, 1);
+ expect(fn).toBe(null);
+ });
+
+ test("source code returns a function", () => {
+ const source = "(arg) => arg";
+
+ const fn = process_frontend_fn(source, false, 1, 1);
+ expect(typeof fn).toBe("function");
+ });
+
+ test("arrays of values can be passed to the generated function", async () => {
+ const source = "(arg) => arg";
+
+ const fn = process_frontend_fn(source, false, 1, 1);
+ if (fn) {
+ await expect(fn([1])).resolves.toEqual([1]);
+ }
+ });
+
+ test("arrays of many values can be passed", async () => {
+ const source = "(...args) => args";
+
+ const fn = process_frontend_fn(source, false, 1, 1);
+ if (fn) {
+ await expect(fn([1, 2, 3, 4, 5, 6])).resolves.toEqual([1, 2, 3, 4, 5, 6]);
+ }
+ });
+
+ test("The generated function returns a promise", () => {
+ const source = "(arg) => arg";
+
+ const fn = process_frontend_fn(source, false, 1, 1);
+ if (fn) {
+ expect(fn([1])).toBeInstanceOf(Promise);
+ }
+ });
+
+ test("The generated function is callable and returns the expected value", async () => {
+ const source = "(arg) => arg";
+
+ const fn = process_frontend_fn(source, false, 1, 1);
+ if (fn) {
+ await expect(fn([1])).resolves.toEqual([1]);
+ }
+ });
+
+ test("The return value of the function is wrapped in an array if there is no backend function and the input length is 1", async () => {
+ const source = "(arg) => arg";
+
+ const fn = process_frontend_fn(source, false, 1, 1);
+ if (fn) {
+ await expect(fn([1])).resolves.toEqual([1]);
+ }
+ });
+
+ test("The return value of the function is not wrapped in an array if there is no backend function and the input length is greater than 1", async () => {
+ const source = "(arg) => arg";
+
+ const fn = process_frontend_fn(source, false, 2, 2);
+ if (fn) {
+ await expect(fn([1])).resolves.toEqual(1);
+ }
+ });
+
+ test("The return value of the function is wrapped in an array if there is a backend function and the input length is 1", async () => {
+ const source = "(arg) => arg";
+
+ const fn = process_frontend_fn(source, true, 1, 1);
+ if (fn) {
+ await expect(fn([1])).resolves.toEqual([1]);
+ }
+ });
+
+ test("The return value of the function is not wrapped in an array if there is a backend function and the input length is greater than 1", async () => {
+ const source = "(arg) => arg";
+
+ const fn = process_frontend_fn(source, true, 2, 2);
+ if (fn) {
+ await expect(fn([1])).resolves.toEqual(1);
+ }
+ });
+});
+
+describe("create_target_meta", () => {
+ test("creates a target map", () => {
+ const targets: Dependency["targets"] = [
+ [1, "change"],
+ [2, "input"],
+ [3, "load"]
+ ];
+ const fn_index = 0;
+ const target_map = {};
+
+ const result = create_target_meta(targets, fn_index, target_map);
+ expect(result).toEqual({
+ 1: { change: [0] },
+ 2: { input: [0] },
+ 3: { load: [0] }
+ });
+ });
+
+ test("if the target already exists, it adds the new trigger to the list", () => {
+ const targets: Dependency["targets"] = [
+ [1, "change"],
+ [1, "input"],
+ [1, "load"]
+ ];
+ const fn_index = 1;
+ const target_map: TargetMap = {
+ 1: { change: [0] }
+ };
+
+ const result = create_target_meta(targets, fn_index, target_map);
+ expect(result).toEqual({
+ 1: { change: [0, 1], input: [1], load: [1] }
+ });
+ });
+
+ test("if the trigger already exists, it adds the new function to the list", () => {
+ const targets: Dependency["targets"] = [
+ [1, "change"],
+ [2, "change"],
+ [3, "change"]
+ ];
+ const fn_index = 1;
+ const target_map: TargetMap = {
+ 1: { change: [0] },
+ 2: { change: [0] },
+ 3: { change: [0] }
+ };
+
+ const result = create_target_meta(targets, fn_index, target_map);
+ expect(result).toEqual({
+ 1: { change: [0, 1] },
+ 2: { change: [0, 1] },
+ 3: { change: [0, 1] }
+ });
+ });
+
+ test("if the target and trigger already exist, it adds the new function to the list", () => {
+ const targets: Dependency["targets"] = [[1, "change"]];
+ const fn_index = 1;
+ const target_map: TargetMap = {
+ 1: { change: [0] }
+ };
+
+ const result = create_target_meta(targets, fn_index, target_map);
+ expect(result).toEqual({
+ 1: { change: [0, 1] }
+ });
+ });
+
+ test("if the target, trigger and function id already exist, it does not add duplicates", () => {
+ const targets: Dependency["targets"] = [[1, "change"]];
+ const fn_index = 0;
+ const target_map: TargetMap = {
+ 1: { change: [0] }
+ };
+
+ const result = create_target_meta(targets, fn_index, target_map);
+ expect(result).toEqual({
+ 1: { change: [0] }
+ });
+ });
+});
+
+describe("determine_interactivity", () => {
+ test("returns true if the prop is interactive = true", () => {
+ const result = determine_interactivity(
+ 0,
+ true,
+ "hi",
+ new Set([0]),
+ new Set([2])
+ );
+ expect(result).toBe(true);
+ });
+
+ test("returns false if the prop is interactive = false", () => {
+ const result = determine_interactivity(
+ 0,
+ false,
+ "hi",
+ new Set([0]),
+ new Set([2])
+ );
+ expect(result).toBe(false);
+ });
+
+ test("returns true if the component is an input", () => {
+ const result = determine_interactivity(
+ 0,
+ undefined,
+ "hi",
+ new Set([0]),
+ new Set([2])
+ );
+ expect(result).toBe(true);
+ });
+
+ test("returns true if the component is not an input or output and the component has no default value: empty string", () => {
+ const result = determine_interactivity(
+ 2,
+ undefined,
+ "",
+ new Set([0]),
+ new Set([1])
+ );
+ expect(result).toBe(true);
+ });
+
+ test("returns true if the component is not an input or output and the component has no default value: empty array", () => {
+ const result = determine_interactivity(
+ 2,
+ undefined,
+ [],
+ new Set([0]),
+ new Set([1])
+ );
+ expect(result).toBe(true);
+ });
+
+ test("returns true if the component is not an input or output and the component has no default value: boolean", () => {
+ const result = determine_interactivity(
+ 2,
+ undefined,
+ false,
+ new Set([0]),
+ new Set([1])
+ );
+ expect(result).toBe(true);
+ });
+
+ test("returns true if the component is not an input or output and the component has no default value: undefined", () => {
+ const result = determine_interactivity(
+ 2,
+ undefined,
+ undefined,
+ new Set([0]),
+ new Set([1])
+ );
+ expect(result).toBe(true);
+ });
+
+ test("returns true if the component is not an input or output and the component has no default value: null", () => {
+ const result = determine_interactivity(
+ 2,
+ undefined,
+ null,
+ new Set([0]),
+ new Set([1])
+ );
+ expect(result).toBe(true);
+ });
+
+ test("returns true if the component is not an input or output and the component has no default value: 0", () => {
+ const result = determine_interactivity(
+ 2,
+ undefined,
+ 0,
+ new Set([0]),
+ new Set([1])
+ );
+ expect(result).toBe(true);
+ });
+
+ test("returns false if the component is not an input or output and the component has a default value", () => {
+ const result = determine_interactivity(
+ 2,
+ undefined,
+ "hello",
+ new Set([0]),
+ new Set([1])
+ );
+ expect(result).toBe(false);
+ });
+});
+
+describe("process_server_fn", () => {
+ test("returns an object", () => {
+ const result = process_server_fn(1, ["fn1", "fn2"], {} as any);
+ expect(result).toBeTypeOf("object");
+ });
+
+ test("returns an object with the correct keys", () => {
+ const result = process_server_fn(1, ["fn1", "fn2"], {} as any);
+ expect(Object.keys(result)).toEqual(["fn1", "fn2"]);
+ });
+
+ test("returns an object with the correct keys and values", () => {
+ const app = {
+ component_server: async (id: number, fn: string, args: any) => {
+ return args;
+ }
+ } as client_return;
+
+ const result = process_server_fn(1, ["fn1", "fn2"], app);
+ expect(Object.keys(result)).toEqual(["fn1", "fn2"]);
+
+ expect(result.fn1).toBeInstanceOf(Function);
+ expect(result.fn2).toBeInstanceOf(Function);
+ });
+
+ test("returned server functions should resolve to a promise", async () => {
+ const app = {
+ component_server: async (id: number, fn: string, args: any) => {
+ return args;
+ }
+ } as client_return;
+
+ const result = process_server_fn(1, ["fn1", "fn2"], app);
+ const response = result.fn1("hello");
+ expect(response).toBeInstanceOf(Promise);
+ });
+
+ test("the functions call the clients component_server function with the correct arguments ", async () => {
+ const mock = spy(async (id: number, fn: string, args: any) => {
+ return args;
+ });
+ const app = {
+ component_server: mock as any
+ } as client_return;
+
+ const result = process_server_fn(1, ["fn1", "fn2"], app as client_return);
+ const response = await result.fn1("hello");
+ expect(response).toBe("hello");
+ expect(mock.calls).toEqual([[1, "fn1", "hello"]]);
+ });
+
+ test("if there are no server functions, it returns an empty object", () => {
+ const result = process_server_fn(1, undefined, {} as any);
+ expect(result).toEqual({});
+ });
+});
+
+describe("get_component", () => {
+ test("returns an object", () => {
+ const result = get_component("test-component-one", "class_id", "root", []);
+ expect(result.component).toBeTypeOf("object");
+ });
+
+ test("returns an object with the correct keys", () => {
+ const result = get_component("test-component-one", "class_id", "root", []);
+ expect(Object.keys(result)).toEqual([
+ "component",
+ "name",
+ "example_components"
+ ]);
+ });
+
+ test("the component key is a promise", () => {
+ const result = get_component("test-component-one", "class_id", "root", []);
+ expect(result.component).toBeInstanceOf(Promise);
+ });
+
+ test("the resolved component key is an object", async () => {
+ const result = get_component("test-component-one", "class_id", "root", []);
+ const o = await result.component;
+
+ expect(o).toBeTypeOf("object");
+ });
+
+ test("getting the same component twice should return the same promise", () => {
+ const result = get_component("test-component-one", "class_id", "root", []);
+ const result_two = get_component(
+ "test-component-one",
+ "class_id",
+ "root",
+ []
+ );
+
+ expect(result.component).toBe(result_two.component);
+ });
+
+ test("if example components are not provided, the example_components key is undefined", async () => {
+ const result = get_component("dataset", "class_id", "root", []);
+ expect(result.example_components).toBe(undefined);
+ });
+
+ test("if the type is not a dataset, the example_components key is undefined", async () => {
+ const result = get_component("test-component-one", "class_id", "root", []);
+ expect(result.example_components).toBe(undefined);
+ });
+
+ test("when the type is a dataset, returns an object with the correct keys and values and example components", () => {
+ const result = get_component(
+ "dataset",
+ "class_id",
+ "root",
+ [
+ {
+ type: "test-component-one",
+ component_class_id: "example_class_id",
+ id: 1,
+ props: {
+ value: "hi",
+ interactive: false
+ },
+ has_modes: false,
+ instance: {} as any,
+ component: {} as any
+ }
+ ],
+ ["test-component-one"]
+ );
+ expect(result.component).toBeTypeOf("object");
+ expect(result.example_components).toBeInstanceOf(Map);
+ });
+
+ test("when example components are returned, returns an object with the correct keys and values and example components", () => {
+ const result = get_component(
+ "dataset",
+ "class_id",
+ "root",
+ [
+ {
+ type: "test-component-one",
+ component_class_id: "example_class_id",
+ id: 1,
+ props: {
+ value: "hi",
+ interactive: false
+ },
+ has_modes: false,
+ instance: {} as any,
+ component: {} as any
+ }
+ ],
+ ["test-component-one"]
+ );
+ expect(result.example_components?.get("test-component-one")).toBeTypeOf(
+ "object"
+ );
+ expect(result.example_components?.get("test-component-one")).toBeInstanceOf(
+ Promise
+ );
+ });
+
+ test("if the component is not found then it should request the component from the server", async () => {
+ const api_url = "example.com";
+ const id = "test-random";
+ const variant = "component";
+ const handlers = [
+ http.get(`${api_url}/custom_component/${id}/${variant}/style.css`, () => {
+ return new HttpResponse('console.log("boo")', {
+ status: 200,
+ headers: {
+ "Content-Type": "text/css"
+ }
+ });
+ })
+ ];
+
+ // vi.mock calls are always hoisted out of the test function to the top of the file
+ // so we need to use vi.hoisted to hoist the mock function above the vi.mock call
+ const { mock } = vi.hoisted(() => {
+ return { mock: vi.fn() };
+ });
+
+ vi.mock(
+ `example.com/custom_component/test-random/component/index.js`,
+ async () => {
+ mock();
+ return {
+ default: {
+ default: "HELLO"
+ }
+ };
+ }
+ );
+
+ const server = setupServer(...handlers);
+ server.listen();
+
+ await get_component("test-random", id, api_url, []).component;
+
+ expect(mock).toHaveBeenCalled();
+
+ server.close();
+ });
+});
diff --git a/js/app/test/test_chatinterface_streaming_echo.spec.ts b/js/app/test/test_chatinterface_streaming_echo.spec.ts
--- a/js/app/test/test_chatinterface_streaming_echo.spec.ts
+++ b/js/app/test/test_chatinterface_streaming_echo.spec.ts
@@ -42,7 +42,6 @@ test("chatinterface works with streaming functions and all buttons behave as exp
const expected_text_el_2 = page.locator(".bot p", {
hasText: "Run 3 - You typed: hello"
});
- expect(textbox).toHaveValue("");
await expect(expected_text_el_2).toBeVisible();
await expect
| accordion launches as open even when open = False since gradio 4.20
### Describe the bug
congrats for the work on gradio 4.20!!
a bug seems to have appeared:
accordion launches as open even when open = False since gradio 4.20
parameter open = False seems not to work anymore
I didn't have the bug in gradio 4.17
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
accordion = gr.Accordion(
label="liste des fichiers",
visible=True,
open=False,
)
### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
gradio 4.20
safari, chrome, iOS, mac : the bug is everywhere
```
### Severity
I can work around it
| 2024-02-28T22:01:36 |
|
gradio-app/gradio | 7,599 | gradio-app__gradio-7599 | [
"6336"
] | 82263ed36543140be9446694a157e8729c10bf2a | diff --git a/gradio/components/audio.py b/gradio/components/audio.py
--- a/gradio/components/audio.py
+++ b/gradio/components/audio.py
@@ -24,7 +24,8 @@ class WaveformOptions:
A dataclass for specifying options for the waveform display in the Audio component. An instance of this class can be passed into the `waveform_options` parameter of `gr.Audio`.
Parameters:
waveform_color: The color (as a hex string or valid CSS color) of the full waveform representing the amplitude of the audio. Defaults to a light gray color.
- waveform_progress_color: The color (as a hex string or valid CSS color) that the waveform fills with to as the audio plays. Defaults to an orange color.
+ waveform_progress_color: The color (as a hex string or valid CSS color) that the waveform fills with to as the audio plays. Defaults to the accent color.
+ trim_region_color: The color (as a hex string or valid CSS color) of the trim region. Defaults to the accent color.
show_recording_waveform: Whether to show the waveform when recording audio. Defaults to True.
show_controls: Whether to show the standard HTML audio player below the waveform when recording audio or playing recorded audio. Defaults to False.
skip_length: The percentage (between 0 and 100) of the audio to skip when clicking on the skip forward / skip backward buttons. Defaults to 5.
@@ -33,6 +34,7 @@ class WaveformOptions:
waveform_color: str | None = None
waveform_progress_color: str | None = None
+ trim_region_color: str | None = None
show_recording_waveform: bool = True
show_controls: bool = False
skip_length: int | float = 5
@@ -118,7 +120,7 @@ def __init__(
editable: If True, allows users to manipulate the audio file if the component is interactive. Defaults to True.
min_length: The minimum length of audio (in seconds) that the user can pass into the prediction function. If None, there is no minimum length.
max_length: The maximum length of audio (in seconds) that the user can pass into the prediction function. If None, there is no maximum length.
- waveform_options: A dictionary of options for the waveform display. Options include: waveform_color (str), waveform_progress_color (str), show_controls (bool), skip_length (int). Default is None, which uses the default values for these options.
+ waveform_options: A dictionary of options for the waveform display. Options include: waveform_color (str), waveform_progress_color (str), show_controls (bool), skip_length (int), trim_region_color (str). Default is None, which uses the default values for these options.
"""
valid_sources: list[Literal["upload", "microphone"]] = ["upload", "microphone"]
if sources is None:
| diff --git a/test/test_components.py b/test/test_components.py
--- a/test/test_components.py
+++ b/test/test_components.py
@@ -844,6 +844,7 @@ def test_component_functions(self, gradio_temp_dir):
"skip_length": 5,
"waveform_color": None,
"waveform_progress_color": None,
+ "trim_region_color": None,
},
"_selectable": False,
}
@@ -897,6 +898,7 @@ def test_component_functions(self, gradio_temp_dir):
"skip_length": 5,
"waveform_color": None,
"waveform_progress_color": None,
+ "trim_region_color": None,
},
"_selectable": False,
}
| Audio get louder when trimmed
### Describe the bug
It seems the audio volume goes up every time you crop it as you can hear in the following video:
https://github.com/gradio-app/gradio/assets/25161192/fdcacb28-c5a6-47e6-bfe6-c516d2385c70
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
```py
import gradio as gr
gr.Interface(fn=lambda x: x, inputs='audio', outputs='audio').queue().launch()
```
### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
gradio==4.1.2
```
### Severity
I can work around it
| Ah, I thought this is a different issue, but actually, I think this is the same issue I opened before. I thought trimming corrupted the audio when I opened #6206 but it seems the real issue is that audio gets louder. I guess I'll close #6206 in favor of this. | 2024-03-04T17:35:58 |
gradio-app/gradio | 7,855 | gradio-app__gradio-7855 | [
"7853"
] | d9afb0ff3215a1bdb23810b6a862c4f7bcb5a353 | diff --git a/.config/lite-builder/src/lite_builder/__init__.py b/.config/lite-builder/src/lite_builder/__init__.py
new file mode 100644
diff --git a/.config/lite-builder/src/lite_builder/builder.py b/.config/lite-builder/src/lite_builder/builder.py
new file mode 100644
--- /dev/null
+++ b/.config/lite-builder/src/lite_builder/builder.py
@@ -0,0 +1,5 @@
+from hatchling.builders.wheel import WheelBuilder
+
+
+class LiteBuilder(WheelBuilder):
+ PLUGIN_NAME = 'lite'
\ No newline at end of file
diff --git a/.config/lite-builder/src/lite_builder/hooks.py b/.config/lite-builder/src/lite_builder/hooks.py
new file mode 100644
--- /dev/null
+++ b/.config/lite-builder/src/lite_builder/hooks.py
@@ -0,0 +1,6 @@
+from hatchling.plugin import hookimpl
+from .builder import LiteBuilder
+
+@hookimpl
+def hatch_register_builder():
+ return LiteBuilder
\ No newline at end of file
| Optimize the wheel file contents for Lite
The following files included in the wheel are not needed for Lite.
* `gradio/_frontend_code`
* `gradio/templates`
* `gradio/test_data`
* `gradio/node`
* `gradio/*.pyi`
| 2024-03-27T10:05:57 |
||
gradio-app/gradio | 7,886 | gradio-app__gradio-7886 | [
"7885"
] | 946487cf8e477cbf8d6fad4e772ff574a21782c3 | diff --git a/gradio/routes.py b/gradio/routes.py
--- a/gradio/routes.py
+++ b/gradio/routes.py
@@ -37,7 +37,7 @@
import httpx
import markupsafe
import orjson
-from fastapi import BackgroundTasks, Depends, FastAPI, HTTPException, Response, status
+from fastapi import BackgroundTasks, Depends, FastAPI, HTTPException, status
from fastapi.responses import (
FileResponse,
HTMLResponse,
@@ -331,7 +331,8 @@ def login(form_data: OAuth2PasswordRequestForm = Depends()):
else:
@app.get("/logout")
- def logout(response: Response, user: str = Depends(get_current_user)):
+ def logout(user: str = Depends(get_current_user)):
+ response = RedirectResponse(url="/", status_code=status.HTTP_302_FOUND)
response.delete_cookie(key=f"access-token-{app.cookie_id}", path="/")
response.delete_cookie(
key=f"access-token-unsecure-{app.cookie_id}", path="/"
@@ -340,7 +341,7 @@ def logout(response: Response, user: str = Depends(get_current_user)):
for token in list(app.tokens.keys()):
if app.tokens[token] == user:
del app.tokens[token]
- return RedirectResponse(url="/", status_code=status.HTTP_302_FOUND)
+ return response
###############
# Main Routes
| logout route not deleting cookies
### Describe the bug
using /logout route, the cookies are not deleted properly
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
```python
import gradio as gr
def update_message(request: gr.Request):
return f"Welcome, {request.username}"
with gr.Blocks() as demo:
m = gr.Markdown()
logout_button = gr.Button("Logout", link="/logout")
demo.load(update_message, None, m)
demo.launch(auth=[("Pete", "Pete"), ("Dawood", "Dawood")])
```
### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
gradio==4.21.0
gradio_client==0.12.0
```
### Severity
I can work around it
| 2024-03-29T13:01:59 |
||
gradio-app/gradio | 7,963 | gradio-app__gradio-7963 | [
"7895"
] | eae97c29ce8bb81670e1ee0533efa02afa38cc23 | diff --git a/demo/blocks_flipper/run.py b/demo/blocks_flipper/run.py
--- a/demo/blocks_flipper/run.py
+++ b/demo/blocks_flipper/run.py
@@ -24,8 +24,15 @@ def flip_image(x):
with gr.Accordion("Open for More!", open=False):
gr.Markdown("Look at me...")
- temp_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.1, interactive=True, label="Slide me")
- temp_slider.change(lambda x:x, [temp_slider])
+ temp_slider = gr.Slider(
+ minimum=0.0,
+ maximum=1.0,
+ value=0.1,
+ step=0.1,
+ interactive=True,
+ label="Slide me",
+ )
+ temp_slider.change(lambda x: x, [temp_slider])
text_button.click(flip_text, inputs=text_input, outputs=text_output)
image_button.click(flip_image, inputs=image_input, outputs=image_output)
| diff --git a/js/app/test/blocks_flipper.spec.ts b/js/app/test/blocks_flipper.spec.ts
--- a/js/app/test/blocks_flipper.spec.ts
+++ b/js/app/test/blocks_flipper.spec.ts
@@ -4,6 +4,11 @@ test("accordion stays open when interacting with the slider", async ({
page
}) => {
await page.getByRole("button", { name: "Open for More! ▼" }).click();
- await page.getByLabel("range slider for Slide me").fill("0.5");
+
+ await page.getByLabel("Textbox").nth(0).fill("123");
+
+ await page.getByRole("button", { name: "Flip" }).click();
+ await expect(page.getByLabel("Textbox").nth(1)).toHaveValue("321");
+
await expect(page.getByText("Look at me...")).toBeVisible();
});
| regression on 4.22.0 and following: .focus() doesn't work anymore inside an Accordion(open=False)
### Describe the bug
Thank you for the tremendous work on Gradio!
I use Accordion(open=False) to hide components (such as a Dropdown and some buttons) and show them only when the user need it.
I then use a .focus event on the Dropdown.
It worked perfectly with gradio 4.21.0 but since 4.22.0, 4.23.0 and 4.24.0, it doesn't work anymore : it is impossible to open the Dropdown when I listen to gr.Dropdown.focus() inside my Accordion (only when this Accordion is initially set to open=False), since the Accordion closes itself automatically
this prevents me from using 4.22.0 and newer versions
and another idea : it would be nice to have an open=True parameter for gr.Dropdown as in Accordions
Thanks a lot!
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
import gradio as gr
with gr.Blocks() as demo:
with gr.Accordion(open=False):
zouzou = gr.Dropdown(
choices=["zouzou", "zouzou2"],
type="index",
)
zouzou.focus(
fn=lambda x: print(x),
inputs=[zouzou],
outputs=[],
queue=False,
)
demo.launch()
### Screenshot
before I click on the Dropdown
<img width="1298" alt="image" src="https://github.com/gradio-app/gradio/assets/126974970/03cca8cc-0660-4a5d-bd58-9e8d8c659977">
and then the Accordion closes itself when I click on the Dropdown, which disappears
<img width="1260" alt="image" src="https://github.com/gradio-app/gradio/assets/126974970/f8e026d9-2925-4763-83cc-1c5f8b6edaec">
### Logs
```shell
No logs
```
### System Info
```shell
gradio==4.22.0 (or 4.23.0 or 4.24.0)
Mac Safari or Chrome
PC Chrome or Edge
iPhone Safari or Chrome
```
### Severity
I can work around it
| Also, since 1.24.0 clicking onto a subcontrol inside an open accordion closes it (Firefox but probably others too...)
I corrected repro code to see the error: open=False needs to be in the gr.Accordion() to see the bug, not in the Blocks() of course
| 2024-04-08T14:08:47 |
gradio-app/gradio | 8,066 | gradio-app__gradio-8066 | [
"6087"
] | 568eeb26a90182519f491df34b1bc75ac67a7313 | diff --git a/.config/copy_frontend.py b/.config/copy_frontend.py
--- a/.config/copy_frontend.py
+++ b/.config/copy_frontend.py
@@ -36,6 +36,7 @@ def ignore(s, names):
):
ignored.append(n)
return ignored
+
shutil.copytree(
str(entry),
str(pathlib.Path("gradio") / "_frontend_code" / entry.name),
diff --git a/gradio/cli/commands/components/build.py b/gradio/cli/commands/components/build.py
--- a/gradio/cli/commands/components/build.py
+++ b/gradio/cli/commands/components/build.py
@@ -20,7 +20,6 @@
from gradio.cli.commands.display import LivePanelDisplay
gradio_template_path = Path(gradio.__file__).parent / "templates" / "frontend"
-gradio_node_path = Path(gradio.__file__).parent / "node" / "dev" / "files" / "index.js"
def _build(
@@ -120,6 +119,20 @@ def _build(
"node must be installed in order to run build command."
)
+ gradio_node_path = subprocess.run(
+ [node, "-e", "console.log(require.resolve('@gradio/preview'))"],
+ cwd=Path(component_directory / "frontend"),
+ check=False,
+ capture_output=True,
+ )
+
+ if gradio_node_path.returncode != 0:
+ raise ValueError(
+ "Could not find `@gradio/preview`. Run `npm i -D @gradio/preview` in your frontend folder."
+ )
+
+ gradio_node_path = gradio_node_path.stdout.decode("utf-8").strip()
+
node_cmds = [
node,
gradio_node_path,
diff --git a/gradio/cli/commands/components/dev.py b/gradio/cli/commands/components/dev.py
--- a/gradio/cli/commands/components/dev.py
+++ b/gradio/cli/commands/components/dev.py
@@ -13,7 +13,6 @@
from gradio.cli.commands.components.install_component import _get_executable_path
gradio_template_path = Path(gradio.__file__).parent / "templates" / "frontend"
-gradio_node_path = Path(gradio.__file__).parent / "node" / "dev" / "files" / "index.js"
def _dev(
@@ -63,6 +62,20 @@ def _dev(
"gradio", gradio_path, cli_arg_name="--gradio-path"
)
+ gradio_node_path = subprocess.run(
+ [node, "-e", "console.log(require.resolve('@gradio/preview'))"],
+ cwd=Path(component_directory / "frontend"),
+ check=False,
+ capture_output=True,
+ )
+
+ if gradio_node_path.returncode != 0:
+ raise ValueError(
+ "Could not find `@gradio/preview`. Run `npm i -D @gradio/preview` in your frontend folder."
+ )
+
+ gradio_node_path = gradio_node_path.stdout.decode("utf-8").strip()
+
proc = subprocess.Popen(
[
node,
| diff --git a/js/preview/test/test/frontend/package.json b/js/preview/test/test/frontend/package.json
--- a/js/preview/test/test/frontend/package.json
+++ b/js/preview/test/test/frontend/package.json
@@ -17,5 +17,8 @@
"@gradio/statustracker": "0.4.12",
"@gradio/utils": "0.3.2",
"@zerodevx/svelte-json-view": "^1.0.7"
+ },
+ "devDependencies": {
+ "@gradio/preview": "workspace:^"
}
}
| Custom Component break on JS Import for some libraries
### Describe the bug
Importing the `smiles-drawer` library completely breaks the custom component dev server.
It's the import alone that breaks the dev server.
The dev server gets stuck on the loading screen with some errors in JS Dev Console. Terminal log has no errors.

The package is correctly installed in `frontend/node_modules`
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
Here is a repo with the offending line
https://github.com/duerrsimon/molecule2d/blob/dc7e1a3ef52efe25de2580a6eba70aedfeeb026b/frontend/Molecule.svelte#L5
Here is a reproduction that it in a non-gradio svelte environment this works normally:
https://codesandbox.io/s/relaxed-einstein-7l8cx8?file=/Molecule.svelte
### Screenshot
_No response_
### Logs
_No response_
### System Info
```shell
Package JSON: {
"name": "molecule2d",
"version": "0.4.0-beta.6",
"description": "Gradio UI packages",
"type": "module",
"author": "",
"license": "ISC",
"private": false,
"main_changeset": true,
"exports": {
".": "./Index.svelte",
"./example": "./Example.svelte",
"./package.json": "./package.json"
},
"dependencies": {
"@gradio/atoms": "0.2.0-beta.4",
"@gradio/icons": "0.2.0-beta.1",
"@gradio/statustracker": "0.3.0-beta.6",
"@gradio/utils": "0.2.0-beta.4",
"smiles-drawer": "^2.1.7"
}
}
Gradio Environment Information:
------------------------------
Operating System: Linux
gradio version: 3.45.0b13
gradio_client version: 0.7.0b0
------------------------------------------------
gradio dependencies in your environment:
aiofiles: 23.2.1
altair: 5.1.1
fastapi: 0.103.1
ffmpy: 0.3.1
gradio-client==0.7.0-beta.0 is not installed.
httpx: 0.25.0
huggingface-hub: 0.17.3
importlib-resources: 6.1.0
jinja2: 3.1.2
markupsafe: 2.1.3
matplotlib: 3.8.0
numpy: 1.26.0
orjson: 3.9.7
packaging: 23.1
pandas: 2.1.1
pillow: 10.0.1
pydantic: 2.4.1
pydub: 0.25.1
python-multipart: 0.0.6
pyyaml: 6.0.1
requests: 2.31.0
semantic-version: 2.10.0
tomlkit==0.12.0 is not installed.
typer: 0.9.0
typing-extensions: 4.8.0
uvicorn: 0.23.2
websockets: 11.0.3
authlib; extra == 'oauth' is not installed.
itsdangerous; extra == 'oauth' is not installed.
gradio_client dependencies in your environment:
fsspec: 2023.9.2
httpx: 0.25.0
huggingface-hub: 0.17.3
packaging: 23.1
requests: 2.31.0
typing-extensions: 4.8.0
websockets: 11.0.3
```
### Severity
Blocking usage of gradio
| @duerrsimon I can reproduce with the smiles package but not in general.
I created a custom component that installs/imports `date-picker-svelte` (https://www.npmjs.com/package/date-picker-svelte) and I can import it dynamically in dev mode.

See the repo here: https://github.com/freddyaboulton/gradio-datepicker
Gonna tag @pngwn in case he has thoughts about this!
Yeah. I should have clarified that I am also able to import some other npm packages.
Just not this one and since it works in a vanilla environment I thought I make you aware of it since it completely breaks the component in a non obvious manner instead of failing a bit more gracefully.
Yea we definitely want to take a look at what's happening specifically with the smiles-drawer! I just got worried that _all_ npm packages were causing dev mode to break so I needed to verify for myself 😅
Thanks for the repros and clarifications!
I think i know what is happening. I've not verified this but this is probably because the `smiles-drawer` package uses cjs instead of esm as you can see here:
https://github.com/reymond-group/smilesDrawer/blob/master/app.js#L2-L8
Vite is capable of transforming these cjs modules to esm but we are probably missing some config. I'll take a quick look asap. | 2024-04-18T19:35:10 |
gradio-app/gradio | 8,133 | gradio-app__gradio-8133 | [
"6746"
] | 937c8583714216e926606b251bc9225271bdc5a7 | diff --git a/gradio/cli/commands/components/_create_utils.py b/gradio/cli/commands/components/_create_utils.py
--- a/gradio/cli/commands/components/_create_utils.py
+++ b/gradio/cli/commands/components/_create_utils.py
@@ -199,6 +199,11 @@ def __post_init__(self):
"""
),
),
+ "ImageEditor": ComponentFiles(
+ template="ImageEditor",
+ python_file_name="image_editor.py",
+ js_dir="imageeditor",
+ ),
}
| Cannot use ImageEditor as Custom Component Template
### Describe the bug
gradio cc create does not work because it cannot find the python file
gradio cc dev does not work (error below)
### Have you searched existing issues? 🔎
- [X] I have searched and found no existing issues
### Reproduction
gradio cc create MyImageEditor --template ImageEdito
gradio cc dev
### Screenshot

```
/Users/freddy/sources/components/myimageeditor/frontend/shared/tools/Crop.svelte:193:
1 No scopable elements found in template. If you're using global styles in the style
tag, you should move it into an external stylesheet file and import it in JS. See
https://github.com/sveltejs/vite-plugin-svelte/blob/main/docs/faq.md#where-should-i-p
ut-my-global-styles.
```
### Logs
_No response_
### System Info
```shell
Gradio Environment Information:
------------------------------
Operating System: Darwin
gradio version: 4.8.0
gradio_client version: 0.7.1
------------------------------------------------
gradio dependencies in your environment:
aiofiles: 23.2.1
altair: 4.2.0
fastapi: 0.104.1
ffmpy: 0.3.1
gradio-client==0.7.1 is not installed.
httpx: 0.23.0
huggingface-hub: 0.19.4
importlib-resources: 6.0.1
jinja2: 3.1.2
markupsafe: 2.1.1
matplotlib: 3.7.2
numpy: 1.26.1
orjson: 3.9.4
packaging: 22.0
pandas: 1.5.3
pillow: 9.2.0
pydantic: 2.5.2
pydub: 0.25.1
python-multipart: 0.0.6
pyyaml: 6.0
semantic-version: 2.10.0
tomlkit==0.12.0 is not installed.
typer: 0.9.0
typing-extensions: 4.8.0
uvicorn: 0.23.2
authlib; extra == 'oauth' is not installed.
itsdangerous; extra == 'oauth' is not installed.
gradio_client dependencies in your environment:
fsspec: 2023.6.0
httpx: 0.23.0
huggingface-hub: 0.19.4
packaging: 22.0
typing-extensions: 4.8.0
websockets: 11.0.3
```
### Severity
I can work around it
| 2024-04-25T16:57:34 |
||
ludwig-ai/ludwig | 212 | ludwig-ai__ludwig-212 | [
"211"
] | c0361aee48b994083715d0b4c874d27773dd99b1 | diff --git a/ludwig/features/base_feature.py b/ludwig/features/base_feature.py
--- a/ludwig/features/base_feature.py
+++ b/ludwig/features/base_feature.py
@@ -18,7 +18,7 @@
import tensorflow as tf
from ludwig.models.modules.fully_connected_modules import FCStack
-from ludwig.models.modules.recurrent_modules import reduce_sequence
+from ludwig.models.modules.reduction_modules import reduce_sequence
from ludwig.utils.misc import merge_dict
from ludwig.utils.tf_utils import sequence_length_3D
diff --git a/ludwig/models/combiners.py b/ludwig/models/combiners.py
--- a/ludwig/models/combiners.py
+++ b/ludwig/models/combiners.py
@@ -20,7 +20,7 @@
from ludwig.features.feature_utils import SEQUENCE_TYPES
from ludwig.models.modules.fully_connected_modules import FCStack
-from ludwig.models.modules.recurrent_modules import reduce_sequence
+from ludwig.models.modules.reduction_modules import reduce_sequence
from ludwig.models.modules.sequence_encoders import CNNRNN
from ludwig.models.modules.sequence_encoders import ParallelCNN
from ludwig.models.modules.sequence_encoders import RNN
diff --git a/ludwig/models/modules/sequence_encoders.py b/ludwig/models/modules/sequence_encoders.py
--- a/ludwig/models/modules/sequence_encoders.py
+++ b/ludwig/models/modules/sequence_encoders.py
@@ -23,7 +23,7 @@
from ludwig.models.modules.embedding_modules import EmbedSequence
from ludwig.models.modules.fully_connected_modules import FCStack
from ludwig.models.modules.recurrent_modules import RecurrentStack
-from ludwig.models.modules.recurrent_modules import reduce_sequence
+from ludwig.models.modules.reduction_modules import reduce_sequence
class PassthroughEncoder:
| Bad import in base_feature.py
Minor issue:
There is a bad import in [base_feature.py](https://github.com/uber/ludwig/blob/master/ludwig/features/base_feature.py#L21). The reduce_sequence function is actually in the [reduction_modules](https://github.com/uber/ludwig/blob/master/ludwig/models/modules/reduction_modules.py#L79) not recurrent_modules.
I can have a PR to fix this.
| 2019-03-13T21:47:29 |
||
ludwig-ai/ludwig | 424 | ludwig-ai__ludwig-424 | [
"386"
] | 9c93622cb60ca89becbfd8d8110ee1a74d491d7f | diff --git a/ludwig/data/preprocessing.py b/ludwig/data/preprocessing.py
--- a/ludwig/data/preprocessing.py
+++ b/ludwig/data/preprocessing.py
@@ -401,6 +401,7 @@ def preprocess_for_training(
data_train_csv,
data_validation_csv,
data_test_csv,
+ train_set_metadata_json,
skip_save_processed_input,
preprocessing_params,
random_seed
@@ -496,6 +497,7 @@ def _preprocess_csv_for_training(
data_train_csv=None,
data_validation_csv=None,
data_test_csv=None,
+ train_set_metadata_json=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
random_seed=default_random_seed
@@ -507,12 +509,17 @@ def _preprocess_csv_for_training(
:param data_train_csv: training csv data
:param data_validation_csv: validation csv data
:param data_test_csv: test csv data
+ :param train_set_metadata_json: train set metadata json
:param skip_save_processed_input: if False, the pre-processed data is saved
as .hdf5 files in the same location as the csvs with the same names.
:param preprocessing_params: preprocessing parameters
:param random_seed: random seed
:return: training, test, validation datasets, training metadata
"""
+ train_set_metadata = None
+ if train_set_metadata_json is not None:
+ train_set_metadata = load_metadata(train_set_metadata_json)
+
if data_csv is not None:
# Use data and ignore _train, _validation and _test.
# Also ignore data and train set metadata needs preprocessing
@@ -525,6 +532,7 @@ def _preprocess_csv_for_training(
data_csv,
features,
preprocessing_params,
+ train_set_metadata=train_set_metadata,
random_seed=random_seed
)
if not skip_save_processed_input:
@@ -564,6 +572,7 @@ def _preprocess_csv_for_training(
concatenated_df,
features,
preprocessing_params,
+ train_set_metadata=train_set_metadata,
random_seed=random_seed
)
training_set, test_set, validation_set = split_dataset_tvt(
@@ -610,6 +619,7 @@ def _preprocess_df_for_training(
data_train_df=None,
data_validation_df=None,
data_test_df=None,
+ train_set_metadata_json=None,
preprocessing_params=default_preprocessing_parameters,
random_seed=default_random_seed
):
@@ -617,6 +627,9 @@ def _preprocess_df_for_training(
processed data as hdf5 as we don't expect users to do this as the data can
be processed in memory
"""
+ train_set_metadata = None
+ if train_set_metadata_json is not None:
+ train_set_metadata = load_metadata(train_set_metadata_json)
if data_df is not None:
# needs preprocessing
@@ -637,6 +650,7 @@ def _preprocess_df_for_training(
data_df,
features,
preprocessing_params,
+ train_set_metadata=train_set_metadata,
random_seed=random_seed
)
training_set, test_set, validation_set = split_dataset_tvt(
diff --git a/ludwig/models/model.py b/ludwig/models/model.py
--- a/ludwig/models/model.py
+++ b/ludwig/models/model.py
@@ -412,6 +412,9 @@ def train(
# ====== Setup session =======
session = self.initialize_session(gpus, gpu_fraction)
+ if self.weights_save_path:
+ self.restore(session, self.weights_save_path)
+
train_writer = None
if is_on_master():
if not skip_save_log:
diff --git a/ludwig/train.py b/ludwig/train.py
--- a/ludwig/train.py
+++ b/ludwig/train.py
@@ -206,6 +206,13 @@ def full_train(
else:
experiment_dir_name = '/'
+ # if model_load_path is not None, load its train_set_metadata
+ if model_load_path is not None:
+ train_set_metadata_json = os.path.join(
+ model_load_path,
+ TRAIN_SET_METADATA_FILE_NAME
+ )
+
description_fn, training_stats_fn, model_dir = get_file_names(
experiment_dir_name
)
| Cannot feed value of shape for Tensor with --model_load_path for NER
**Describe the bug**
I am trying to use pretrained model weights for initialization for building NER models using --model_load_path (defines the path of my pretrained model). Pretrained model built with 7K records. I was trying to build another NER model and uses pretrained model weights for initialization with 1.5 K records. But I am getting Cannot feed value of shape for Tensor error while building model with below command. I am using CONLL data for training
**Command:**
ludwig experiment --data_train_csv training/training_1.csv --data_validation_csv validation/validation_1.csv --data_test_csv testing/testing_1.csv --model_load_path results/experiment_run_0/model --model_definition_file model_definition/model_definition.yaml
**Error**
File "D:\Development_Avecto\Anaconda3\Scripts\ludwig-script.py", line 11, in <module>
load_entry_point('ludwig==0.1.2', 'console_scripts', 'ludwig')()
File "D:\Development_Avecto\Anaconda3\lib\site-packages\ludwig\cli.py", line 94, in main
CLI()
File "D:\Development_Avecto\Anaconda3\lib\site-packages\ludwig\cli.py", line 60, in __init__
getattr(self, args.command)()
File "D:\Development_Avecto\Anaconda3\lib\site-packages\ludwig\cli.py", line 65, in experiment
experiment.cli(sys.argv[2:])
File "D:\Development_Avecto\Anaconda3\lib\site-packages\ludwig\experiment.py", line 472, in cli
experiment(**vars(args))
File "D:\Development_Avecto\Anaconda3\lib\site-packages\ludwig\experiment.py", line 200, in experiment
**kwargs
File "D:\Development_Avecto\Anaconda3\lib\site-packages\ludwig\train.py", line 301, in full_train
debug=debug
File "D:\Development_Avecto\Anaconda3\lib\site-packages\ludwig\train.py", line 461, in train
**model_definition['training']
File "D:\Development_Avecto\Anaconda3\lib\site-packages\ludwig\models\model.py", line 523, in train
is_training=True
File "D:\Development_Avecto\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 929, in run
run_metadata_ptr)
File "D:\Development_Avecto\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1128, in _run
str(subfeed_t.get_shape())))
ValueError: Cannot feed value of shape (128, 60) for Tensor 'Tagged_Entities/Tagged_Entities_placeholder:0', which has shape '(?, 113)'
**Expected behavior**
I wanted to create the NER model using prebuilt model wiegths as initialization
**Environment (please complete the following information):**
- OS: Windows
- Version: Windows10
- Python version : 3.6.5
- Ludwig version : 0.1.2
| @priyaJagan90 thank you for this report. My best guess so far is that the original dataset and the new one have different parameters for `max_sequence_length`.
Could you please help us replicate the error so that we can debug it?
Ideally we would need:
- yaml definition of the original model
- original data (most likely a subsample, even obfuscated or with synthetic data may work, as long as it leads to the same error)
- command for the second training importing the previous model
- the second dataset (again subsampled, obfuscated or synthesized, as long as the error happens).
Hi,
Thanks for your response. I have attached the subsample for original dataset and dataset for second run.
[Data_subset.zip](https://github.com/uber/ludwig/files/3349713/Data_subset.zip)
| 2019-07-13T19:20:01 |
|
ludwig-ai/ludwig | 531 | ludwig-ai__ludwig-531 | [
"520"
] | a594ab5fd93337cbae3bdc4b8719509c1739e129 | diff --git a/ludwig/utils/visualization_utils.py b/ludwig/utils/visualization_utils.py
--- a/ludwig/utils/visualization_utils.py
+++ b/ludwig/utils/visualization_utils.py
@@ -624,7 +624,7 @@ def confidence_fitlering_3d_plot(
thresholds_2,
accuracies,
dataset_kepts,
- threshold_fields=None,
+ threshold_output_feature_names=None,
title=None,
filename=None
):
@@ -651,8 +651,8 @@ def confidence_fitlering_3d_plot(
ax.grid(which='minor', alpha=0.5)
ax.grid(which='major', alpha=0.75)
- ax.set_xlabel('{} probability'.format(threshold_fields[0]))
- ax.set_ylabel('{} probability'.format(threshold_fields[1]))
+ ax.set_xlabel('{} probability'.format(threshold_output_feature_names[0]))
+ ax.set_ylabel('{} probability'.format(threshold_output_feature_names[1]))
ax.set_xlim(np.min(thresholds_1), np.max(thresholds_1))
ax.set_ylim(np.min(thresholds_2), np.max(thresholds_2))
@@ -938,7 +938,7 @@ def predictions_distribution_plot(
def confusion_matrix_plot(
confusion_matrix,
labels=None,
- field=None,
+ output_feature_name=None,
filename=None
):
mpl.rcParams.update({'figure.autolayout': True})
@@ -957,8 +957,8 @@ def confusion_matrix_plot(
ax.grid(False)
ax.tick_params(axis='both', which='both', length=0)
fig.colorbar(cax, ax=ax, extend='max')
- ax.set_xlabel('Predicted {}'.format(field))
- ax.set_ylabel('Actual {}'.format(field))
+ ax.set_xlabel('Predicted {}'.format(output_feature_name))
+ ax.set_ylabel('Actual {}'.format(output_feature_name))
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
diff --git a/ludwig/visualize.py b/ludwig/visualize.py
--- a/ludwig/visualize.py
+++ b/ludwig/visualize.py
@@ -40,17 +40,17 @@
def validate_conf_treshholds_and_probabilities_2d_3d(
- probabilities, treshhold_fields
+ probabilities, treshhold_output_feature_names
):
- """Ensure probabilities and treshhold fields arrays have two members each.
+ """Ensure probabilities and treshhold output_feature_names arrays have two members each.
:param probabilities: List of probabilities per model
- :param threshhold_fields: List of threshhold fields per model
+ :param threshhold_output_feature_names: List of threshhold output_feature_names per model
:raise: RuntimeError
"""
validation_mapping = {
'probabilities': probabilities,
- 'treshhold_fields': treshhold_fields
+ 'treshhold_output_feature_names': treshhold_output_feature_names
}
for item, value in validation_mapping.items():
item_len = len(value)
@@ -101,47 +101,53 @@ def convert_to_list(item):
return item if item is None or isinstance(item, list) else [item]
-def validate_visualization_prediction_field_from_train_stats(
- field,
+def _validate_output_feature_name_from_train_stats(
+ output_feature_name,
train_stats_per_model
):
- """Validate prediction field from model train stats and return it as list.
+ """Validate prediction output_feature_name from model train stats and return it as list.
- :param field: field containing ground truth
+ :param output_feature_name: output_feature_name containing ground truth
:param train_stats_per_model: list of per model train stats
- :return fields: list of field(s) containing ground truth
+ :return output_feature_names: list of output_feature_name(s) containing ground truth
"""
- fields_set = set()
+ output_feature_names_set = set()
for ls in train_stats_per_model:
for _, values in ls.items():
for key in values:
- fields_set.add(key)
+ output_feature_names_set.add(key)
try:
- return [field] if field in fields_set else fields_set
- # raised if field is emtpy iterable (e.g. [] in set())
+ if output_feature_name in output_feature_names_set:
+ return [output_feature_name]
+ else:
+ return output_feature_names_set
+ # raised if output_feature_name is emtpy iterable (e.g. [] in set())
except TypeError:
- return fields_set
+ return output_feature_names_set
-def validate_visualization_prediction_field_from_test_stats(
- field,
+def _validate_output_feature_name_from_test_stats(
+ output_feature_name,
test_stats_per_model
):
- """Validate prediction field from model test stats and return it as list.
+ """Validate prediction output_feature_name from model test stats and return it as list.
- :param field: field containing ground truth
+ :param output_feature_name: output_feature_name containing ground truth
:param test_stats_per_model: list of per model test stats
- :return fields: list of field(s) containing ground truth
+ :return output_feature_names: list of output_feature_name(s) containing ground truth
"""
- fields_set = set()
+ output_feature_names_set = set()
for ls in test_stats_per_model:
for key in ls:
- fields_set.add(key)
+ output_feature_names_set.add(key)
try:
- return [field] if field in fields_set else fields_set
- # raised if field is emtpy iterable (e.g. [] in set())
+ if output_feature_name in output_feature_names_set:
+ return [output_feature_name]
+ else:
+ return output_feature_names_set
+ # raised if output_feature_name is emtpy iterable (e.g. [] in set())
except TypeError:
- return fields_set
+ return output_feature_names_set
def generate_filename_template_path(output_dir, filename_template):
@@ -186,7 +192,7 @@ def compare_classifiers_performance_from_prob_cli(
probabilities,
ground_truth,
ground_truth_split,
- field,
+ output_feature_name,
**kwargs
):
"""Load model data from files to be shown by compare_classifiers_from_prob.
@@ -194,11 +200,11 @@ def compare_classifiers_performance_from_prob_cli(
:param probabilities: Path to experiment probabilities file
:param ground_truth: Path to ground truth file
:param ground_truth_split: Type of ground truth split - train, val, test
- :param field: Target prediction field
+ :param output_feature_name: Name of the output feature to visualize
:param kwargs: model configuration arguments
:return None:
"""
- gt = load_from_file(ground_truth, field, ground_truth_split)
+ gt = load_from_file(ground_truth, output_feature_name, ground_truth_split)
probabilities_per_model = load_data_for_viz(
'load_from_file', probabilities, dtype=float
)
@@ -212,7 +218,7 @@ def compare_classifiers_performance_from_pred_cli(
ground_truth,
ground_truth_metadata,
ground_truth_split,
- field,
+ output_feature_name,
**kwargs
):
"""Load model data from files to be shown by compare_classifiers_from_pred
@@ -221,11 +227,11 @@ def compare_classifiers_performance_from_pred_cli(
:param ground_truth: Path to ground truth file
:param ground_truth_metadata: Path to ground truth metadata file
:param ground_truth_split: Type of ground truth split - train, val, test
- :param field: Target prediction field
+ :param output_feature_name: Name of the output feature to visualize
:param kwargs: model configuration arguments
:return None:
"""
- gt = load_from_file(ground_truth, field, ground_truth_split)
+ gt = load_from_file(ground_truth, output_feature_name, ground_truth_split)
metadata = load_json(ground_truth_metadata)
predictions_per_model_raw = load_data_for_viz(
'load_from_file', predictions, dtype=str
@@ -234,7 +240,7 @@ def compare_classifiers_performance_from_pred_cli(
np.ndarray.flatten(pred) for pred in predictions_per_model_raw
]
compare_classifiers_performance_from_pred(
- predictions_per_model, gt, metadata, field, **kwargs
+ predictions_per_model, gt, metadata, output_feature_name, **kwargs
)
@@ -242,7 +248,7 @@ def compare_classifiers_performance_subset_cli(
probabilities,
ground_truth,
ground_truth_split,
- field,
+ output_feature_name,
**kwargs
):
"""Load model data from files to be shown by compare_classifiers_subset.
@@ -250,11 +256,11 @@ def compare_classifiers_performance_subset_cli(
:param probabilities: Path to experiment probabilities file
:param ground_truth: Path to ground truth file
:param ground_truth_split: Type of ground truth split - train, val, test
- :param field: Target prediction field
+ :param output_feature_name: Name of the output feature to visualize
:param kwargs: model configuration arguments
:return None:
"""
- gt = load_from_file(ground_truth, field, ground_truth_split)
+ gt = load_from_file(ground_truth, output_feature_name, ground_truth_split)
probabilities_per_model = load_data_for_viz(
'load_from_file', probabilities, dtype=float
)
@@ -267,7 +273,7 @@ def compare_classifiers_performance_changing_k_cli(
probabilities,
ground_truth,
ground_truth_split,
- field,
+ output_feature_name,
**kwargs
):
"""Load model data from files to be shown by compare_classifiers_changing_k.
@@ -275,11 +281,11 @@ def compare_classifiers_performance_changing_k_cli(
:param probabilities: Path to experiment probabilities file
:param ground_truth: Path to ground truth file
:param ground_truth_split: Type of ground truth split - train, val, test
- :param field: Target prediction field
+ :param output_feature_name: Name of the output feature to visualize
:param kwargs: model configuration arguments
:return None:
"""
- gt = load_from_file(ground_truth, field, ground_truth_split)
+ gt = load_from_file(ground_truth, output_feature_name, ground_truth_split)
probabilities_per_model = load_data_for_viz(
'load_from_file', probabilities, dtype=float
)
@@ -310,7 +316,7 @@ def compare_classifiers_predictions_cli(
predictions,
ground_truth,
ground_truth_split,
- field,
+ output_feature_name,
**kwargs
):
"""Load model data from files to be shown by compare_classifiers_predictions
@@ -318,11 +324,11 @@ def compare_classifiers_predictions_cli(
:param predictions: Path to experiment predictions file
:param ground_truth: Path to ground truth file
:param ground_truth_split: Type of ground truth split - train, val, test
- :param field: Target prediction field
+ :param output_feature_name: Name of the output feature to visualize
:param kwargs: model configuration arguments
:return None:
"""
- gt = load_from_file(ground_truth, field, ground_truth_split)
+ gt = load_from_file(ground_truth, output_feature_name, ground_truth_split)
predictions_per_model = load_data_for_viz(
'load_from_file', predictions, dtype=str
)
@@ -333,7 +339,7 @@ def compare_classifiers_predictions_distribution_cli(
predictions,
ground_truth,
ground_truth_split,
- field,
+ output_feature_name,
**kwargs
):
"""Load model data from files to be shown by
@@ -342,11 +348,11 @@ def compare_classifiers_predictions_distribution_cli(
:param predictions: Path to experiment predictions file
:param ground_truth: Path to ground truth file
:param ground_truth_split: Type of ground truth split - train, val, test
- :param field: Target prediction field
+ :param output_feature_name: Name of the output feature to visualize
:param kwargs: model configuration arguments
:return None:
"""
- gt = load_from_file(ground_truth, field, ground_truth_split)
+ gt = load_from_file(ground_truth, output_feature_name, ground_truth_split)
predictions_per_model = load_data_for_viz(
'load_from_file', predictions, dtype=str
)
@@ -359,7 +365,7 @@ def confidence_thresholding_cli(
probabilities,
ground_truth,
ground_truth_split,
- field,
+ output_feature_name,
**kwargs
):
"""Load model data from files to be shown by confidence_thresholding.
@@ -367,11 +373,11 @@ def confidence_thresholding_cli(
:param probabilities: Path to experiment probabilities file
:param ground_truth: Path to ground truth file
:param ground_truth_split: Type of ground truth split - train, val, test
- :param field: Target prediction field
+ :param output_feature_name: Name of the output feature to visualize
:param kwargs: model configuration arguments
:return None:
"""
- gt = load_from_file(ground_truth, field, ground_truth_split)
+ gt = load_from_file(ground_truth, output_feature_name, ground_truth_split)
probabilities_per_model = load_data_for_viz(
'load_from_file', probabilities, dtype=float
)
@@ -384,7 +390,7 @@ def confidence_thresholding_data_vs_acc_cli(
probabilities,
ground_truth,
ground_truth_split,
- field,
+ output_feature_name,
**kwargs
):
"""Load model data from files to be shown by
@@ -393,11 +399,11 @@ def confidence_thresholding_data_vs_acc_cli(
:param probabilities: Path to experiment probabilities file
:param ground_truth: Path to ground truth file
:param ground_truth_split: Type of ground truth split - train, val, test
- :param field: Target prediction field
+ :param output_feature_name: Name of the output feature to visualize
:param kwargs: model configuration arguments
:return None:
"""
- gt = load_from_file(ground_truth, field, ground_truth_split)
+ gt = load_from_file(ground_truth, output_feature_name, ground_truth_split)
probabilities_per_model = load_data_for_viz(
'load_from_file', probabilities, dtype=float
)
@@ -410,7 +416,7 @@ def confidence_thresholding_data_vs_acc_subset_cli(
probabilities,
ground_truth,
ground_truth_split,
- field,
+ output_feature_name,
**kwargs
):
"""Load model data from files to be shown by
@@ -419,11 +425,11 @@ def confidence_thresholding_data_vs_acc_subset_cli(
:param probabilities: Path to experiment probabilities file
:param ground_truth: Path to ground truth file
:param ground_truth_split: Type of ground truth split - train, val, test
- :param field: Target prediction field
+ :param output_feature_name: Name of the output feature to visualize
:param kwargs: model configuration arguments
:return None:
"""
- gt = load_from_file(ground_truth, field, ground_truth_split)
+ gt = load_from_file(ground_truth, output_feature_name, ground_truth_split)
probabilities_per_model = load_data_for_viz(
'load_from_file', probabilities, dtype=float
)
@@ -437,7 +443,7 @@ def confidence_thresholding_data_vs_acc_subset_per_class_cli(
ground_truth,
ground_truth_metadata,
ground_truth_split,
- field,
+ output_feature_name,
**kwargs
):
"""Load model data from files to be shown by compare_classifiers_multiclass
@@ -446,17 +452,17 @@ def confidence_thresholding_data_vs_acc_subset_per_class_cli(
:param ground_truth: Path to ground truth file
:param ground_truth_metadata: Path to ground truth metadata file
:param ground_truth_split: Type of ground truth split - train, val, test
- :param field: Target prediction field
+ :param output_feature_name: Name of the output feature to visualize
:param kwargs: model configuration arguments
:return None:
"""
metadata = load_json(ground_truth_metadata)
- gt = load_from_file(ground_truth, field, ground_truth_split)
+ gt = load_from_file(ground_truth, output_feature_name, ground_truth_split)
probabilities_per_model = load_data_for_viz(
'load_from_file', probabilities, dtype=float
)
confidence_thresholding_data_vs_acc_subset_per_class(
- probabilities_per_model, gt, metadata, field, **kwargs
+ probabilities_per_model, gt, metadata, output_feature_name, **kwargs
)
@@ -464,7 +470,7 @@ def confidence_thresholding_2thresholds_2d_cli(
probabilities,
ground_truth,
ground_truth_split,
- threshold_fields,
+ threshold_output_feature_names,
**kwargs
):
"""Load model data from files to be shown by
@@ -473,25 +479,25 @@ def confidence_thresholding_2thresholds_2d_cli(
:param probabilities: Path to experiment probabilities file
:param ground_truth: Path to ground truth file
:param ground_truth_split: Type of ground truth split - train, val, test
- :param threshold_fields: Target prediction fields
+ :param threshold_output_feature_names: Name of the output feature to visualizes
:param kwargs: model configuration arguments
:return None:
"""
gt1 = load_from_file(
ground_truth,
- threshold_fields[0],
+ threshold_output_feature_names[0],
ground_truth_split
)
gt2 = load_from_file(
ground_truth,
- threshold_fields[1],
+ threshold_output_feature_names[1],
ground_truth_split
)
probabilities_per_model = load_data_for_viz(
'load_from_file', probabilities, dtype=float
)
confidence_thresholding_2thresholds_2d(
- probabilities_per_model, [gt1, gt2], threshold_fields, **kwargs
+ probabilities_per_model, [gt1, gt2], threshold_output_feature_names, **kwargs
)
@@ -499,7 +505,7 @@ def confidence_thresholding_2thresholds_3d_cli(
probabilities,
ground_truth,
ground_truth_split,
- threshold_fields,
+ threshold_output_feature_names,
**kwargs
):
"""Load model data from files to be shown by
@@ -508,25 +514,25 @@ def confidence_thresholding_2thresholds_3d_cli(
:param probabilities: Path to experiment probabilities file
:param ground_truth: Path to ground truth file
:param ground_truth_split: Type of ground truth split - train, val, test
- :param threshold_fields: Target prediction fields
+ :param threshold_output_feature_names: Names of the output features to visualize
:param kwargs: model configuration arguments
:return None:
"""
gt1 = load_from_file(
ground_truth,
- threshold_fields[0],
+ threshold_output_feature_names[0],
ground_truth_split
)
gt2 = load_from_file(
ground_truth,
- threshold_fields[1],
+ threshold_output_feature_names[1],
ground_truth_split
)
probabilities_per_model = load_data_for_viz(
'load_from_file', probabilities, dtype=float
)
confidence_thresholding_2thresholds_3d(
- probabilities_per_model, [gt1, gt2], threshold_fields, **kwargs
+ probabilities_per_model, [gt1, gt2], threshold_output_feature_names, **kwargs
)
@@ -534,7 +540,7 @@ def binary_threshold_vs_metric_cli(
probabilities,
ground_truth,
ground_truth_split,
- field,
+ output_feature_name,
**kwargs
):
"""Load model data from files to be shown by binary_threshold_vs_metric_cli.
@@ -542,11 +548,11 @@ def binary_threshold_vs_metric_cli(
:param probabilities: Path to experiment probabilities file
:param ground_truth: Path to ground truth file
:param ground_truth_split: Type of ground truth split - train, val, test
- :param field: Target prediction field
+ :param output_feature_name: Name of the output feature to visualize
:param kwargs: model configuration arguments
:return None:
"""
- gt = load_from_file(ground_truth, field, ground_truth_split)
+ gt = load_from_file(ground_truth, output_feature_name, ground_truth_split)
probabilities_per_model = load_data_for_viz(
'load_from_file', probabilities, dtype=float
)
@@ -559,7 +565,7 @@ def roc_curves_cli(
probabilities,
ground_truth,
ground_truth_split,
- field,
+ output_feature_name,
**kwargs
):
"""Load model data from files to be shown by roc_curves_cli.
@@ -567,11 +573,11 @@ def roc_curves_cli(
:param probabilities: Path to experiment probabilities file
:param ground_truth: Path to ground truth file
:param ground_truth_split: Type of ground truth split - train, val, test
- :param field: Target prediction field
+ :param output_feature_name: Name of the output feature to visualize
:param kwargs: model configuration arguments
:return None:
"""
- gt = load_from_file(ground_truth, field, ground_truth_split)
+ gt = load_from_file(ground_truth, output_feature_name, ground_truth_split)
probabilities_per_model = load_data_for_viz(
'load_from_file', probabilities, dtype=float
)
@@ -596,7 +602,7 @@ def calibration_1_vs_all_cli(
probabilities,
ground_truth,
ground_truth_split,
- field,
+ output_feature_name,
**kwargs
):
"""Load model data from files to be shown by calibration_1_vs_all_cli.
@@ -604,11 +610,11 @@ def calibration_1_vs_all_cli(
:param probabilities: Path to experiment probabilities file
:param ground_truth: Path to ground truth file
:param ground_truth_split: Type of ground truth split - train, val, test
- :param field: Target prediction field
+ :param output_feature_name: Name of the output feature to visualize
:param kwargs: model configuration arguments
:return None:
"""
- gt = load_from_file(ground_truth, field, ground_truth_split)
+ gt = load_from_file(ground_truth, output_feature_name, ground_truth_split)
probabilities_per_model = load_data_for_viz(
'load_from_file', probabilities, dtype=float
)
@@ -619,7 +625,7 @@ def calibration_multiclass_cli(
probabilities,
ground_truth,
ground_truth_split,
- field,
+ output_feature_name,
**kwargs
):
"""Load model data from files to be shown by calibration_multiclass_cli.
@@ -627,11 +633,11 @@ def calibration_multiclass_cli(
:param probabilities: Path to experiment probabilities file
:param ground_truth: Path to ground truth file
:param ground_truth_split: Type of ground truth split - train, val, test
- :param field: Target prediction field
+ :param output_feature_name: Name of the output feature to visualize
:param kwargs: model configuration arguments
:return None:
"""
- gt = load_from_file(ground_truth, field, ground_truth_split)
+ gt = load_from_file(ground_truth, output_feature_name, ground_truth_split)
probabilities_per_model = load_data_for_viz(
'load_from_file', probabilities, dtype=float
)
@@ -666,7 +672,7 @@ def frequency_vs_f1_cli(test_statistics, ground_truth_metadata, **kwargs):
def learning_curves(
train_stats_per_model,
- field,
+ output_feature_name,
model_names=None,
output_directory=None,
file_format='pdf',
@@ -678,7 +684,8 @@ def learning_curves(
it produces a line plot showing how that measure changed over the course
of the epochs of training on the training and validation sets.
:param train_stats_per_model: List containing train statistics per model
- :param field: Prediction field containing ground truth.
+ :param output_feature_name: Name of the output feature that is predicted
+ and for which is provided ground truth
:param model_names: List of the names of the models to use as labels.
:param output_directory: Directory where to save plots.
If not specified, plots will be displayed in a window
@@ -692,20 +699,20 @@ def learning_curves(
)
train_stats_per_model_list = convert_to_list(train_stats_per_model)
model_names_list = convert_to_list(model_names)
- fields = validate_visualization_prediction_field_from_train_stats(
- field,
+ output_feature_names = _validate_output_feature_name_from_train_stats(
+ output_feature_name,
train_stats_per_model_list
)
metrics = [LOSS, ACCURACY, HITS_AT_K, EDIT_DISTANCE]
- for field in fields:
+ for output_feature_name in output_feature_names:
for metric in metrics:
- if metric in train_stats_per_model_list[0]['train'][field]:
+ if metric in train_stats_per_model_list[0]['train'][output_feature_name]:
filename = None
if filename_template_path:
- filename = filename_template_path.format(field, metric)
+ filename = filename_template_path.format(output_feature_name, metric)
- training_stats = [learning_stats['train'][field][metric]
+ training_stats = [learning_stats['train'][output_feature_name][metric]
for learning_stats in
train_stats_per_model_list]
@@ -713,7 +720,7 @@ def learning_curves(
for learning_stats in train_stats_per_model_list:
if 'validation' in learning_stats:
validation_stats.append(
- learning_stats['validation'][field][metric]
+ learning_stats['validation'][output_feature_name][metric]
)
else:
validation_stats.append(None)
@@ -723,14 +730,14 @@ def learning_curves(
validation_stats,
metric,
model_names_list,
- title='Learning Curves {}'.format(field),
+ title='Learning Curves {}'.format(output_feature_name),
filename=filename
)
def compare_performance(
test_stats_per_model,
- field, model_names=None,
+ output_feature_name, model_names=None,
output_directory=None,
file_format='pdf',
**kwargs
@@ -740,9 +747,9 @@ def compare_performance(
For each model (in the aligned lists of test_statistics and model_names)
it produces bars in a bar plot, one for each overall metric available
- in the test_statistics file for the specified field.
+ in the test_statistics file for the specified output_feature_name.
:param test_stats_per_model: List containing train statistics per model
- :param field: Prediction field containing ground truth.
+ :param output_feature_name: Name of the output feature that is predicted and for which is provided ground truth
:param model_names: List of the names of the models to use as labels.
:param output_directory: Directory where to save plots.
If not specified, plots will be displayed in a window
@@ -757,24 +764,24 @@ def compare_performance(
test_stats_per_model_list = convert_to_list(test_stats_per_model)
model_names_list = convert_to_list(model_names)
- fields = validate_visualization_prediction_field_from_test_stats(
- field,
+ output_feature_names = _validate_output_feature_name_from_test_stats(
+ output_feature_name,
test_stats_per_model_list
)
- for field in fields:
+ for output_feature_name in output_feature_names:
accuracies = []
hits_at_ks = []
edit_distances = []
for test_stats_per_model in test_stats_per_model_list:
- if ACCURACY in test_stats_per_model[field]:
- accuracies.append(test_stats_per_model[field][ACCURACY])
- if HITS_AT_K in test_stats_per_model[field]:
- hits_at_ks.append(test_stats_per_model[field][HITS_AT_K])
- if EDIT_DISTANCE in test_stats_per_model[field]:
+ if ACCURACY in test_stats_per_model[output_feature_name]:
+ accuracies.append(test_stats_per_model[output_feature_name][ACCURACY])
+ if HITS_AT_K in test_stats_per_model[output_feature_name]:
+ hits_at_ks.append(test_stats_per_model[output_feature_name][HITS_AT_K])
+ if EDIT_DISTANCE in test_stats_per_model[output_feature_name]:
edit_distances.append(
- test_stats_per_model[field][EDIT_DISTANCE])
+ test_stats_per_model[output_feature_name][EDIT_DISTANCE])
measures = []
measures_names = []
@@ -791,14 +798,14 @@ def compare_performance(
filename = None
if filename_template_path:
- filename = filename_template_path.format(field)
+ filename = filename_template_path.format(output_feature_name)
os.makedirs(output_directory, exist_ok=True)
visualization_utils.compare_classifiers_plot(
measures,
measures_names,
model_names_list,
- title='Performance comparison on {}'.format(field),
+ title='Performance comparison on {}'.format(output_feature_name),
filename=filename
)
@@ -817,10 +824,9 @@ def compare_classifiers_performance_from_prob(
For each model it produces bars in a bar plot, one for each overall metric
computed on the fly from the probabilities of predictions for the specified
- field.
+ output_feature_name.
:param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing computed model ground truth
- data for target prediction field based on the model metadata
+ :param ground_truth: NumPy Array containing ground truth data
:param top_n_classes: List containing the number of classes to plot
:param labels_limit: Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
@@ -887,7 +893,7 @@ def compare_classifiers_performance_from_pred(
predictions_per_model,
ground_truth,
metadata,
- field,
+ output_feature_name,
labels_limit,
model_names=None,
output_directory=None,
@@ -897,13 +903,12 @@ def compare_classifiers_performance_from_pred(
"""Produces model comparision barplot visualization from predictions.
For each model it produces bars in a bar plot, one for each overall metric
- computed on the fly from the predictions for the specified field.
+ computed on the fly from the predictions for the specified output_feature_name.
:param predictions_per_model: List containing the model predictions
- for the specified field
- :param ground_truth: NumPy Array containing computed model ground truth
- data for target prediction field based on the model metadata
+ for the specified output_feature_name
+ :param ground_truth: NumPy Array containing ground truth data
:param metadata: Model's input metadata
- :param field: field containing ground truth
+ :param output_feature_name: output_feature_name containing ground truth
:param labels_limit: Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
:param model_names: List of the names of the models to use as labels.
@@ -920,7 +925,7 @@ def compare_classifiers_performance_from_pred(
mapped_preds = []
try:
for pred in preds:
- mapped_preds.append([metadata[field]['str2idx'][val] for val in
+ mapped_preds.append([metadata[output_feature_name]['str2idx'][val] for val in
pred])
preds = mapped_preds
# If predictions are coming from npy file there is no need to convert to
@@ -979,12 +984,11 @@ def compare_classifiers_performance_subset(
For each model it produces bars in a bar plot, one for each overall metric
computed on the fly from the probabilities predictions for the
- specified field, considering only a subset of the full training set.
+ specified output_feature_name, considering only a subset of the full training set.
The way the subset is obtained is using the top_n_classes and
subset parameters.
:param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing computed model ground truth
- data for target prediction field based on the model metadata
+ :param ground_truth: NumPy Array containing ground truth data
:param top_n_classes: List containing the number of classes to plot
:param labels_limit: Maximum numbers of labels.
:param subset: Type of the subset filtering
@@ -1091,11 +1095,10 @@ def compare_classifiers_performance_changing_k(
For each model it produces a line plot that shows the Hits@K measure
(that counts a prediction as correct if the model produces it among the
- first k) while changing k from 1 to top_k for the specified field.
+ first k) while changing k from 1 to top_k for the specified output_feature_name.
:param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing computed model ground truth
- data for target prediction field based on the model metadata
- param top_k: Number of elements in the ranklist to consider
+ :param ground_truth: NumPy Array containing ground truth data
+ :param top_k: Number of elements in the ranklist to consider
:param labels_limit: Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
:param model_names: List of the names of the models to use as labels.
@@ -1146,20 +1149,20 @@ def compare_classifiers_performance_changing_k(
def compare_classifiers_multiclass_multimetric(
test_stats_per_model,
metadata,
- field,
+ output_feature_name,
top_n_classes,
model_names=None,
output_directory=None,
file_format='pdf',
**kwargs
):
- """Show the precision, recall and F1 of the model for the specified field.
+ """Show the precision, recall and F1 of the model for the specified output_feature_name.
For each model it produces four plots that show the precision,
- recall and F1 of the model on several classes for the specified field.
+ recall and F1 of the model on several classes for the specified output_feature_name.
:param test_stats_per_model: List containing train statistics per model
:param metadata: Model's input metadata
- :param field: Prediction field containing ground truth.
+ :param output_feature_name: Name of the output feature that is predicted and for which is provided ground truth
:param top_n_classes: List containing the number of classes to plot
:param model_names: List of the names of the models to use as labels.
:param output_directory: Directory where to save plots.
@@ -1177,32 +1180,32 @@ def compare_classifiers_multiclass_multimetric(
test_stats_per_model_list = convert_to_list(test_stats_per_model)
model_names_list = convert_to_list(model_names)
- fields = validate_visualization_prediction_field_from_test_stats(
- field,
+ output_feature_names = _validate_output_feature_name_from_test_stats(
+ output_feature_name,
test_stats_per_model_list
)
for i, test_statistics in enumerate(
test_stats_per_model_list):
- for field in fields:
+ for output_feature_name in output_feature_names:
model_name_name = (
model_names_list[i]
if model_names_list is not None and i < len(model_names_list)
else ''
)
- if 'per_class_stats' not in test_statistics[field]:
+ if 'per_class_stats' not in test_statistics[output_feature_name]:
logging.warning(
- 'The field {} in test statistics does not contain "{}", '
- 'skipping it'.format(field, per_class_stats)
+ 'The output_feature_name {} in test statistics does not contain "{}", '
+ 'skipping it'.format(output_feature_name, per_class_stats)
)
break
- per_class_stats = test_statistics[field]['per_class_stats']
+ per_class_stats = test_statistics[output_feature_name]['per_class_stats']
precisions = []
recalls = []
f1_scores = []
labels = []
for _, class_name in sorted(
- [(metadata[field]['str2idx'][key], key)
+ [(metadata[output_feature_name]['str2idx'][key], key)
for key in per_class_stats.keys()],
key=lambda tup: tup[0]):
class_stats = per_class_stats[class_name]
@@ -1221,7 +1224,7 @@ def compare_classifiers_multiclass_multimetric(
if filename_template_path:
os.makedirs(output_directory, exist_ok=True)
filename = filename_template_path.format(
- model_name_name, field, 'top{}'.format(k)
+ model_name_name, output_feature_name, 'top{}'.format(k)
)
visualization_utils.compare_classifiers_multiclass_multimetric_plot(
@@ -1230,7 +1233,7 @@ def compare_classifiers_multiclass_multimetric(
labels=ls,
title='{} Multiclass Precision / Recall / '
'F1 Score top {} {}'.format(model_name_name, k,
- field),
+ output_feature_name),
filename=filename
)
@@ -1245,7 +1248,7 @@ def compare_classifiers_multiclass_multimetric(
if filename_template_path:
os.makedirs(output_directory, exist_ok=True)
filename = filename_template_path.format(
- model_name_name, field, 'best{}'.format(k)
+ model_name_name, output_feature_name, 'best{}'.format(k)
)
visualization_utils.compare_classifiers_multiclass_multimetric_plot(
[p_np[higher_f1s],
@@ -1255,14 +1258,14 @@ def compare_classifiers_multiclass_multimetric(
labels=labels_np[higher_f1s].tolist(),
title='{} Multiclass Precision / Recall / '
'F1 Score best {} classes {}'.format(
- model_name_name, k, field),
+ model_name_name, k, output_feature_name),
filename=filename
)
lower_f1s = sorted_indices[:k]
filename = None
if filename_template_path:
filename = filename_template_path.format(
- model_name_name, field, 'worst{}'.format(k)
+ model_name_name, output_feature_name, 'worst{}'.format(k)
)
visualization_utils.compare_classifiers_multiclass_multimetric_plot(
[p_np[lower_f1s],
@@ -1271,14 +1274,14 @@ def compare_classifiers_multiclass_multimetric(
['precision', 'recall', 'f1 score'],
labels=labels_np[lower_f1s].tolist(),
title='{} Multiclass Precision / Recall / F1 Score worst '
- 'k classes {}'.format(model_name_name, k, field),
+ 'k classes {}'.format(model_name_name, k, output_feature_name),
filename=filename
)
filename = None
if filename_template_path:
filename = filename_template_path.format(
- model_name_name, field, 'sorted'
+ model_name_name, output_feature_name, 'sorted'
)
visualization_utils.compare_classifiers_multiclass_multimetric_plot(
[p_np[sorted_indices[::-1]],
@@ -1287,26 +1290,26 @@ def compare_classifiers_multiclass_multimetric(
['precision', 'recall', 'f1 score'],
labels=labels_np[sorted_indices[::-1]].tolist(),
title='{} Multiclass Precision / Recall / F1 Score '
- '{} sorted'.format(model_name_name, field),
+ '{} sorted'.format(model_name_name, output_feature_name),
filename=filename
)
logging.info('\n')
logging.info(model_name_name)
- tmp_str = '{0} best 5 classes: '.format(field)
+ tmp_str = '{0} best 5 classes: '.format(output_feature_name)
tmp_str += '{}'
logging.info(tmp_str.format(higher_f1s))
logging.info(f1_np[higher_f1s])
- tmp_str = '{0} worst 5 classes: '.format(field)
+ tmp_str = '{0} worst 5 classes: '.format(output_feature_name)
tmp_str += '{}'
logging.info(tmp_str.format(lower_f1s))
logging.info(f1_np[lower_f1s])
tmp_str = '{0} number of classes with f1 score > 0: '.format(
- field)
+ output_feature_name)
tmp_str += '{}'
logging.info(tmp_str.format(np.sum(f1_np > 0)))
tmp_str = '{0} number of classes with f1 score = 0: '.format(
- field)
+ output_feature_name)
tmp_str += '{}'
logging.info(tmp_str.format(np.sum(f1_np == 0)))
@@ -1320,11 +1323,10 @@ def compare_classifiers_predictions(
file_format='pdf',
**kwargs
):
- """Show two models comparision of their field predictions.
+ """Show two models comparision of their output_feature_name predictions.
:param predictions_per_model: List containing the model predictions
- :param ground_truth: NumPy Array containing computed model ground truth
- data for target prediction field based on the model metadata
+ :param ground_truth: NumPy Array containing ground truth data
:param labels_limit: Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
:param model_names: List of the names of the models to use as labels.
@@ -1450,13 +1452,12 @@ def compare_classifiers_predictions_distribution(
file_format='pdf',
**kwargs
):
- """Show comparision of models predictions distribution for 10 field classes
+ """Show comparision of models predictions distribution for 10 output_feature_name classes
This visualization produces a radar plot comparing the distributions of
- predictions of the models for the first 10 classes of the specified field.
+ predictions of the models for the first 10 classes of the specified output_feature_name.
:param predictions_per_model: List containing the model predictions
- :param ground_truth: NumPy Array containing computed model ground truth
- data for target prediction field based on the model metadata
+ :param ground_truth: NumPy Array containing ground truth data
:param labels_limit: Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
:param model_names: List of the names of the models to use as labels.
@@ -1515,10 +1516,9 @@ def confidence_thresholding(
For each model it produces a pair of lines indicating the accuracy of
the model and the data coverage while increasing a threshold (x axis) on
- the probabilities of predictions for the specified field.
+ the probabilities of predictions for the specified output_feature_name.
:param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing computed model ground truth
- data for target prediction field based on the model metadata
+ :param ground_truth: NumPy Array containing ground truth data
:param labels_limit: Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
:param model_names: List of the names of the models to use as labels.
@@ -1596,13 +1596,12 @@ def confidence_thresholding_data_vs_acc(
For each model it produces a line indicating the accuracy of the model
and the data coverage while increasing a threshold on the probabilities
- of predictions for the specified field. The difference with
+ of predictions for the specified output_feature_name. The difference with
confidence_thresholding is that it uses two axes instead of three,
not visualizing the threshold and having coverage as x axis instead of
the threshold.
:param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing computed model ground truth
- data for target prediction field based on the model metadata
+ :param ground_truth: NumPy Array containing ground truth data
:param labels_limit: Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
:param model_names: List of the names of the models to use as labels.
@@ -1680,7 +1679,7 @@ def confidence_thresholding_data_vs_acc_subset(
For each model it produces a line indicating the accuracy of the model
and the data coverage while increasing a threshold on the probabilities
- of predictions for the specified field, considering only a subset of the
+ of predictions for the specified output_feature_name, considering only a subset of the
full training set. The way the subset is obtained is using the top_n_classes
and subset parameters.
The difference with confidence_thresholding is that it uses two axes
@@ -1696,8 +1695,7 @@ def confidence_thresholding_data_vs_acc_subset(
and the percentage of datapoints that have been kept from the original set
will be displayed for each model.
:param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing computed model ground truth
- data for target prediction field based on the model metadata
+ :param ground_truth: NumPy Array containing ground truth data
:param top_n_classes: List containing the number of classes to plot
:param labels_limit: Maximum numbers of labels.
:param subset: Type of the subset filtering
@@ -1788,7 +1786,7 @@ def confidence_thresholding_data_vs_acc_subset_per_class(
probabilities_per_model,
ground_truth,
metadata,
- field,
+ output_feature_name,
top_n_classes,
labels_limit,
subset,
@@ -1803,7 +1801,7 @@ def confidence_thresholding_data_vs_acc_subset_per_class(
For each model (in the aligned lists of probabilities and model_names)
it produces a line indicating the accuracy of the model and the data
coverage while increasing a threshold on the probabilities of
- predictions for the specified field, considering only a subset of the
+ predictions for the specified output_feature_name, considering only a subset of the
full training set. The way the subset is obtained is using the
top_n_classes and subset parameters. The difference with
confidence_thresholding is that it uses two axes instead of three,
@@ -1822,8 +1820,7 @@ def confidence_thresholding_data_vs_acc_subset_per_class(
The difference with confidence_thresholding_data_vs_acc_subset is that it
produces one plot per class within the top_n_classes.
:param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing computed model ground truth
- data for target prediction field based on the model metadata
+ :param ground_truth: NumPy Array containing ground truth data
:param metadata: Model's input metadata
:param top_n_classes: List containing the number of classes to plot
:param labels_limit: Maximum numbers of labels.
@@ -1902,18 +1899,18 @@ def confidence_thresholding_data_vs_acc_subset_per_class(
accuracies.append(accuracies_alg)
dataset_kept.append(dataset_kept_alg)
- field_name = metadata[field]['idx2str'][curr_k]
+ output_feature_name_name = metadata[output_feature_name]['idx2str'][curr_k]
filename = None
if filename_template_path:
os.makedirs(output_directory, exist_ok=True)
- filename = filename_template_path.format(field_name)
+ filename = filename_template_path.format(output_feature_name_name)
visualization_utils.confidence_fitlering_data_vs_acc_plot(
accuracies, dataset_kept, model_names_list,
decimal_digits=2,
title='Confidence_Thresholding (Data vs Accuracy) '
- 'for class {}'.format(field_name),
+ 'for class {}'.format(output_feature_name_name),
filename=filename
)
@@ -1921,26 +1918,24 @@ def confidence_thresholding_data_vs_acc_subset_per_class(
def confidence_thresholding_2thresholds_2d(
probabilities_per_model,
ground_truths,
- threshold_fields,
+ threshold_output_feature_names,
labels_limit,
model_names=None,
output_directory=None,
file_format='pdf',
**kwargs
):
- """Show confidence trethreshold data vs accuracy for two field thresholds
+ """Show confidence trethreshold data vs accuracy for two output_feature_name thresholds
The first plot shows several semi transparent lines. They summarize the
3d surfaces displayed by confidence_thresholding_2thresholds_3d that have
thresholds on the confidence of the predictions of the two
- threshold_fields as x and y axes and either the data coverage percentage or
+ threshold_output_feature_names as x and y axes and either the data coverage percentage or
the accuracy as z axis. Each line represents a slice of the data
coverage surface projected onto the accuracy surface.
:param probabilities_per_model: List of model probabilities
- :param ground_truths: List of NumPy Arrays containing computed model ground
- truth data for target prediction fields based on the model
- metadata
- :param threshold_fields: List of fields for 2d threshold
+ :param ground_truths: List of NumPy Arrays containing ground truth data
+ :param threshold_output_feature_names: List of output_feature_names for 2d threshold
:param labels_limit: Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
:param model_names: Name of the model to use as label.
@@ -1952,7 +1947,7 @@ def confidence_thresholding_2thresholds_2d(
try:
validate_conf_treshholds_and_probabilities_2d_3d(
probabilities_per_model,
- threshold_fields
+ threshold_output_feature_names
)
except RuntimeError:
return
@@ -1973,8 +1968,8 @@ def confidence_thresholding_2thresholds_2d(
thresholds = [t / 100 for t in range(0, 101, 5)]
fixed_step_coverage = thresholds
- name_t1 = '{} threshold'.format(threshold_fields[0])
- name_t2 = '{} threshold'.format(threshold_fields[1])
+ name_t1 = '{} threshold'.format(threshold_output_feature_names[0])
+ name_t2 = '{} threshold'.format(threshold_output_feature_names[1])
accuracies = []
dataset_kept = []
@@ -2112,23 +2107,21 @@ def confidence_thresholding_2thresholds_2d(
def confidence_thresholding_2thresholds_3d(
probabilities_per_model,
ground_truths,
- threshold_fields,
+ threshold_output_feature_names,
labels_limit,
output_directory=None,
file_format='pdf',
**kwargs
):
- """Show 3d confidence trethreshold data vs accuracy for two field thresholds
+ """Show 3d confidence trethreshold data vs accuracy for two output_feature_name thresholds
The plot shows the 3d surfaces displayed by
confidence_thresholding_2thresholds_3d that have thresholds on the
- confidence of the predictions of the two threshold_fields as x and y axes
+ confidence of the predictions of the two threshold_output_feature_names as x and y axes
and either the data coverage percentage or the accuracy as z axis.
:param probabilities_per_model: List of model probabilities
- :param ground_truths: List of NumPy Arrays containing computed model ground
- truth data for target prediction fields based on the model
- metadata
- :param threshold_fields: List of fields for 2d threshold
+ :param ground_truths: List of NumPy Arrays containing ground truth data
+ :param threshold_output_feature_names: List of output_feature_names for 2d threshold
:param labels_limit: Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
:param output_directory: Directory where to save plots.
@@ -2139,7 +2132,7 @@ def confidence_thresholding_2thresholds_3d(
try:
validate_conf_treshholds_and_probabilities_2d_3d(
probabilities_per_model,
- threshold_fields
+ threshold_output_feature_names
)
except RuntimeError:
return
@@ -2216,7 +2209,7 @@ def confidence_thresholding_2thresholds_3d(
np.array(thresholds),
np.array(accuracies),
np.array(dataset_kept),
- threshold_fields,
+ threshold_output_feature_names,
title='Confidence_Thresholding, two thresholds',
filename=filename
)
@@ -2232,20 +2225,18 @@ def binary_threshold_vs_metric(
file_format='pdf',
**kwargs
):
- """Show confidence of the model against metric for the specified field.
+ """Show confidence of the model against metric for the specified output_feature_name.
For each metric specified in metrics (options are f1, precision, recall,
accuracy), this visualization produces a line chart plotting a threshold
on the confidence of the model against the metric for the specified
- field. If field is a category feature, positive_label indicates which is
+ output_feature_name. If output_feature_name is a category feature, positive_label indicates which is
the class to be considered positive class and all the others will be
considered negative. It needs to be an integer, to figure out the
association between classes and integers check the ground_truth_metadata
JSON file.
:param probabilities_per_model: List of model probabilities
- :param ground_truth: List of NumPy Arrays containing computed model
- ground truth data for target prediction fields based on the model
- metadata
+ :param ground_truth: List of NumPy Arrays containing ground truth data
:param metrics: metrics to dispay (f1, precision, recall,
accuracy)
:param positive_label: Label of the positive class
@@ -2346,18 +2337,16 @@ def roc_curves(
file_format='pdf',
**kwargs
):
- """Show the roc curves for the specified models output field.
+ """Show the roc curves for the specified models output output_feature_name.
This visualization produces a line chart plotting the roc curves for the
- specified field. If field is a category feature, positive_label indicates
+ specified output_feature_name. If output_feature_name is a category feature, positive_label indicates
which is the class to be considered positive class and all the others will
be considered negative. It needs to be an integer, to figure out the
association between classes and integers check the ground_truth_metadata
JSON file.
:param probabilities_per_model: List of model probabilities
- :param ground_truth: List of NumPy Arrays containing computed model
- ground truth data for target prediction fields based on the model
- metadata
+ :param ground_truth: List of NumPy Arrays containing ground truth data
:param positive_label: Label of the positive class
:param model_names: List of the names of the models to use as labels.
:param output_directory: Directory where to save plots.
@@ -2396,19 +2385,19 @@ def roc_curves(
def roc_curves_from_test_statistics(
test_stats_per_model,
- field,
+ output_feature_name,
model_names=None,
output_directory=None,
file_format='pdf',
**kwargs
):
- """Show the roc curves for the specified models output binary field.
+ """Show the roc curves for the specified models output binary output_feature_name.
- This visualization uses the field, test_statistics and model_names
- parameters. field needs to be binary feature. This visualization produces a
- line chart plotting the roc curves for the specified field.
+ This visualization uses the output_feature_name, test_statistics and model_names
+ parameters. output_feature_name needs to be binary feature. This visualization produces a
+ line chart plotting the roc curves for the specified output_feature_name.
:param test_stats_per_model: List containing train statistics per model
- :param field: Prediction field containing ground truth.
+ :param output_feature_name: Name of the output feature that is predicted and for which is provided ground truth
:param model_names: List of the names of the models to use as labels.
:param output_directory: Directory where to save plots.
If not specified, plots will be displayed in a window
@@ -2423,9 +2412,9 @@ def roc_curves_from_test_statistics(
)
fpr_tprs = []
for curr_test_statistics in test_stats_per_model:
- fpr = curr_test_statistics[field]['roc_curve'][
+ fpr = curr_test_statistics[output_feature_name]['roc_curve'][
'false_positive_rate']
- tpr = curr_test_statistics[field]['roc_curve'][
+ tpr = curr_test_statistics[output_feature_name]['roc_curve'][
'true_positive_rate']
fpr_tprs.append((fpr, tpr))
@@ -2447,11 +2436,11 @@ def calibration_1_vs_all(
file_format='pdf',
**kwargs
):
- """Show models probability of predictions for the specified field.
+ """Show models probability of predictions for the specified output_feature_name.
For each class or each of the k most frequent classes if top_k is
specified, it produces two plots computed on the fly from the
- probabilities of predictions for the specified field.
+ probabilities of predictions for the specified output_feature_name.
The first plot is a calibration curve that shows the calibration of the
predictions considering the current class to be the true one and all
@@ -2463,8 +2452,7 @@ def calibration_1_vs_all(
drawing the distribution for each model (in the aligned lists of
probabilities and model_names).
:param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing computed model ground truth
- data for target prediction field based on the model metadata
+ :param ground_truth: NumPy Array containing ground truth data
:param top_n_classes: List containing the number of classes to plot
:param labels_limit: Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
@@ -2579,11 +2567,10 @@ def calibration_multiclass(
**kwargs
):
"""Show models probability of predictions for each class of the the
- specified field.
+ specified output_feature_name.
:param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing computed model ground truth
- data for target prediction field based on the model metadata
+ :param ground_truth: NumPy Array containing ground truth data
:param labels_limit: Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
:param model_names: List of the names of the models to use as labels.
@@ -2674,7 +2661,7 @@ def calibration_multiclass(
def confusion_matrix(
test_stats_per_model,
metadata,
- field,
+ output_feature_name,
top_n_classes,
normalize,
model_names=None,
@@ -2682,15 +2669,15 @@ def confusion_matrix(
file_format='pdf',
**kwargs
):
- """Show confision matrix in the models predictions for each field.
+ """Show confision matrix in the models predictions for each output_feature_name.
For each model (in the aligned lists of test_statistics and model_names)
it produces a heatmap of the confusion matrix in the predictions for
- each field that has a confusion matrix in test_statistics. The value of
+ each output_feature_name that has a confusion matrix in test_statistics. The value of
top_n_classes limits the heatmap to the n most frequent classes.
:param test_stats_per_model: List containing train statistics per model
:param metadata: Model's input metadata
- :param field: Prediction field containing ground truth.
+ :param output_feature_name: Name of the output feature that is predicted and for which is provided ground truth
:param top_n_classes: List containing the number of classes to plot
:param normalize: Flag to normalize rows in confusion matrix
:param model_names: List of the names of the models to use as labels.
@@ -2707,26 +2694,26 @@ def confusion_matrix(
output_directory,
filename_template
)
- fields = validate_visualization_prediction_field_from_test_stats(
- field,
+ output_feature_names = _validate_output_feature_name_from_test_stats(
+ output_feature_name,
test_stats_per_model_list
)
for i, test_statistics in enumerate(
test_stats_per_model_list):
- for field in fields:
- if 'confusion_matrix' in test_statistics[field]:
+ for output_feature_name in output_feature_names:
+ if 'confusion_matrix' in test_statistics[output_feature_name]:
confusion_matrix = np.array(
- test_statistics[field]['confusion_matrix']
+ test_statistics[output_feature_name]['confusion_matrix']
)
model_name_name = model_names_list[i] if (
model_names_list is not None and i < len(
model_names_list)
) else ''
- if metadata is not None and field in metadata and 'idx2str' in \
- metadata[field]:
- labels = metadata[field]['idx2str']
+ if metadata is not None and output_feature_name in metadata and 'idx2str' in \
+ metadata[output_feature_name]:
+ labels = metadata[output_feature_name]['idx2str']
else:
labels = list(range(len(confusion_matrix)))
@@ -2747,14 +2734,14 @@ def confusion_matrix(
os.makedirs(output_directory, exist_ok=True)
filename = filename_template_path.format(
model_name_name,
- field,
+ output_feature_name,
'top' + str(k)
)
visualization_utils.confusion_matrix_plot(
cm,
labels[:k],
- field=field,
+ output_feature_name=output_feature_name,
filename=filename
)
@@ -2772,7 +2759,7 @@ def confusion_matrix(
if output_directory:
filename = filename_template_path.format(
'entropy_' + model_name_name,
- field,
+ output_feature_name,
'top' + str(k)
)
@@ -2789,17 +2776,17 @@ def confusion_matrix(
def frequency_vs_f1(
test_stats_per_model,
metadata,
- field,
+ output_feature_name,
top_n_classes,
model_names=None,
output_directory=None,
file_format='pdf',
**kwargs
):
- """Show prediction statistics for the specified field for each model.
+ """Show prediction statistics for the specified output_feature_name for each model.
For each model (in the aligned lists of test_statistics and model_names),
- produces two plots statistics of predictions for the specified field.
+ produces two plots statistics of predictions for the specified output_feature_name.
The first plot is a line plot with one x axis representing the different
classes and two vertical axes colored in orange and blue respectively.
@@ -2812,7 +2799,7 @@ def frequency_vs_f1(
frequency.
:param test_stats_per_model: List containing train statistics per model
:param metadata: Model's input metadata
- :param field: Prediction field containing ground truth.
+ :param output_feature_name: Name of the output feature that is predicted and for which is provided ground truth
:param top_n_classes: List containing the number of classes to plot
:param model_names: List of the names of the models to use as labels.
:param output_directory: Directory where to save plots.
@@ -2828,23 +2815,23 @@ def frequency_vs_f1(
output_directory,
filename_template
)
- fields = validate_visualization_prediction_field_from_test_stats(
- field,
+ output_feature_names = _validate_output_feature_name_from_test_stats(
+ output_feature_name,
test_stats_per_model_list
)
k = top_n_classes[0]
for i, test_statistics in enumerate(
test_stats_per_model_list):
- for field in fields:
+ for output_feature_name in output_feature_names:
model_name_name = (model_names_list[i]
if model_names_list is not None and i < len(
model_names_list)
else '')
- per_class_stats = test_statistics[field]['per_class_stats']
+ per_class_stats = test_statistics[output_feature_name]['per_class_stats']
f1_scores = []
labels = []
- class_names = metadata[field]['idx2str']
+ class_names = metadata[output_feature_name]['idx2str']
if k > 0:
class_names = class_names[:k]
for class_name in class_names:
@@ -2855,17 +2842,17 @@ def frequency_vs_f1(
f1_np = np.nan_to_num(np.array(f1_scores, dtype=np.float32))
f1_sorted_indices = f1_np.argsort()
- field_frequency_dict = {
- metadata[field]['str2idx'][key]: val
- for key, val in metadata[field]['str2freq'].items()
+ output_feature_name_frequency_dict = {
+ metadata[output_feature_name]['str2idx'][key]: val
+ for key, val in metadata[output_feature_name]['str2freq'].items()
}
- field_frequency_np = np.array(
- [field_frequency_dict[class_id]
- for class_id in sorted(field_frequency_dict)],
+ output_feature_name_frequency_np = np.array(
+ [output_feature_name_frequency_dict[class_id]
+ for class_id in sorted(output_feature_name_frequency_dict)],
dtype=np.int32
)
- field_frequency_reordered = field_frequency_np[
+ output_feature_name_frequency_reordered = output_feature_name_frequency_np[
f1_sorted_indices[::-1]
][:len(f1_sorted_indices)]
f1_reordered = f1_np[f1_sorted_indices[::-1]][
@@ -2874,41 +2861,41 @@ def frequency_vs_f1(
filename = None
if output_directory:
os.makedirs(output_directory, exist_ok=True)
- filename = filename_template_path.format(model_name_name, field)
+ filename = filename_template_path.format(model_name_name, output_feature_name)
visualization_utils.double_axis_line_plot(
f1_reordered,
- field_frequency_reordered,
+ output_feature_name_frequency_reordered,
'F1 score',
'frequency',
labels=labels,
title='{} F1 Score vs Frequency {}'.format(
model_name_name,
- field
+ output_feature_name
),
filename=filename
)
- frequency_sorted_indices = field_frequency_np.argsort()
- field_frequency_reordered = field_frequency_np[
+ frequency_sorted_indices = output_feature_name_frequency_np.argsort()
+ output_feature_name_frequency_reordered = output_feature_name_frequency_np[
frequency_sorted_indices[::-1]
][:len(f1_sorted_indices)]
- f1_reordered = np.zeros(len(field_frequency_reordered))
+ f1_reordered = np.zeros(len(output_feature_name_frequency_reordered))
for idx in frequency_sorted_indices[::-1]:
if idx < len(f1_np):
f1_reordered[idx] = f1_np[idx]
visualization_utils.double_axis_line_plot(
- field_frequency_reordered,
+ output_feature_name_frequency_reordered,
f1_reordered,
'frequency',
'F1 score',
labels=labels,
title='{} F1 Score vs Frequency {}'.format(
model_name_name,
- field
+ output_feature_name
),
filename=filename
)
@@ -2999,9 +2986,9 @@ def cli(sys_argv):
parser.add_argument(
'-f',
- '--field',
+ '--output_feature_name',
default=[],
- help='field containing ground truth'
+ help='name of the output feature to visualize'
)
parser.add_argument(
'-gts',
@@ -3011,10 +2998,10 @@ def cli(sys_argv):
)
parser.add_argument(
'-tf',
- '--threshold_fields',
+ '--threshold_output_feature_names',
default=[],
nargs='+',
- help='fields for 2d threshold'
+ help='names of output features for 2d threshold'
)
parser.add_argument(
'-pred',
| diff --git a/tests/integration_tests/test_visualization.py b/tests/integration_tests/test_visualization.py
--- a/tests/integration_tests/test_visualization.py
+++ b/tests/integration_tests/test_visualization.py
@@ -74,20 +74,20 @@ def run_experiment(input_features, output_features, **kwargs):
return exp_dir_name
-def get_output_field_name(experiment_dir, output_feature=0):
+def get_output_feature_name(experiment_dir, output_feature=0):
"""Helper function to extract specified output feature name.
:param experiment_dir: Path to the experiment directory
:param output_feature: position of the output feature the description.json
- :return field_name: name of the first output feature name
+ :return output_feature_name: name of the first output feature name
from the experiment
"""
description_file = experiment_dir + '/description.json'
with open(description_file, 'rb') as f:
content = json.load(f)
- field_name = \
+ output_feature_name = \
content['model_definition']['output_features'][output_feature]['name']
- return field_name
+ return output_feature_name
def test_visualisation_learning_curves_output_saved(csv_filename):
@@ -277,8 +277,8 @@ def test_visualisation_compare_classifiers_from_prob_csv_output_saved(
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
- probability = exp_dir_name + '/{}_probabilities.csv'.format(field_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
+ probability = exp_dir_name + '/{}_probabilities.csv'.format(output_feature_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
test_cmd_pdf = ['python',
@@ -288,8 +288,8 @@ def test_visualisation_compare_classifiers_from_prob_csv_output_saved(
'compare_classifiers_performance_from_prob',
'--ground_truth',
ground_truth,
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--probabilities',
probability,
probability,
@@ -344,8 +344,8 @@ def test_visualisation_compare_classifiers_from_prob_npy_output_saved(
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
- probability = exp_dir_name + '/{}_probabilities.npy'.format(field_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
+ probability = exp_dir_name + '/{}_probabilities.npy'.format(output_feature_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
test_cmd_pdf = ['python',
@@ -355,8 +355,8 @@ def test_visualisation_compare_classifiers_from_prob_npy_output_saved(
'compare_classifiers_performance_from_prob',
'--ground_truth',
ground_truth,
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--probabilities',
probability,
probability,
@@ -409,8 +409,8 @@ def test_visualisation_compare_classifiers_from_pred_npy_output_saved(
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
- prediction = exp_dir_name + '/{}_predictions.npy'.format(field_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
+ prediction = exp_dir_name + '/{}_predictions.npy'.format(output_feature_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
ground_truth_metadata = experiment_source_data_name + '.json'
@@ -423,8 +423,8 @@ def test_visualisation_compare_classifiers_from_pred_npy_output_saved(
ground_truth_metadata,
'--ground_truth',
ground_truth,
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--predictions',
prediction,
prediction,
@@ -477,8 +477,8 @@ def test_visualisation_compare_classifiers_from_pred_csv_output_saved(
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
- prediction = exp_dir_name + '/{}_predictions.csv'.format(field_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
+ prediction = exp_dir_name + '/{}_predictions.csv'.format(output_feature_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
ground_truth_metadata = experiment_source_data_name + '.json'
@@ -491,8 +491,8 @@ def test_visualisation_compare_classifiers_from_pred_csv_output_saved(
ground_truth_metadata,
'--ground_truth',
ground_truth,
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--predictions',
prediction,
prediction,
@@ -543,8 +543,8 @@ def test_visualisation_compare_classifiers_subset_output_saved(csv_filename):
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
- probability = exp_dir_name + '/{}_probabilities.npy'.format(field_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
+ probability = exp_dir_name + '/{}_probabilities.npy'.format(output_feature_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
test_cmd_pdf = ['python',
@@ -552,8 +552,8 @@ def test_visualisation_compare_classifiers_subset_output_saved(csv_filename):
'ludwig.visualize',
'--visualization',
'compare_classifiers_performance_subset',
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--probabilities',
probability,
probability,
@@ -606,8 +606,8 @@ def test_visualisation_compare_classifiers_changing_k_output_pdf(csv_filename):
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
- probability = exp_dir_name + '/{}_probabilities.npy'.format(field_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
+ probability = exp_dir_name + '/{}_probabilities.npy'.format(output_feature_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
test_cmd_pdf = ['python',
@@ -615,8 +615,8 @@ def test_visualisation_compare_classifiers_changing_k_output_pdf(csv_filename):
'ludwig.visualize',
'--visualization',
'compare_classifiers_performance_changing_k',
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--probabilities',
probability,
probability,
@@ -672,7 +672,7 @@ def test_visualisation_compare_classifiers_multiclass_multimetric_output_saved(
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
test_stats = exp_dir_name + '/test_statistics.json'
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth_metadata = experiment_source_data_name + '.json'
@@ -681,8 +681,8 @@ def test_visualisation_compare_classifiers_multiclass_multimetric_output_saved(
'ludwig.visualize',
'--visualization',
'compare_classifiers_multiclass_multimetric',
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--test_statistics',
test_stats,
test_stats,
@@ -735,8 +735,8 @@ def test_visualisation_compare_classifiers_predictions_npy_output_saved(
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
- prediction = exp_dir_name + '/{}_predictions.npy'.format(field_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
+ prediction = exp_dir_name + '/{}_predictions.npy'.format(output_feature_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
test_cmd_pdf = ['python',
@@ -746,8 +746,8 @@ def test_visualisation_compare_classifiers_predictions_npy_output_saved(
'compare_classifiers_predictions',
'--ground_truth',
ground_truth,
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--predictions',
prediction,
prediction,
@@ -801,8 +801,8 @@ def test_visualisation_compare_classifiers_predictions_csv_output_saved(
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
- prediction = exp_dir_name + '/{}_predictions.csv'.format(field_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
+ prediction = exp_dir_name + '/{}_predictions.csv'.format(output_feature_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
test_cmd_pdf = ['python',
@@ -812,8 +812,8 @@ def test_visualisation_compare_classifiers_predictions_csv_output_saved(
'compare_classifiers_predictions',
'--ground_truth',
ground_truth,
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--predictions',
prediction,
prediction,
@@ -865,8 +865,8 @@ def test_visualisation_cmp_classifiers_predictions_distribution_output_saved(
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
- prediction = exp_dir_name + '/{}_predictions.npy'.format(field_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
+ prediction = exp_dir_name + '/{}_predictions.npy'.format(output_feature_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
test_cmd_pdf = ['python',
@@ -876,8 +876,8 @@ def test_visualisation_cmp_classifiers_predictions_distribution_output_saved(
'compare_classifiers_predictions_distribution',
'--ground_truth',
ground_truth,
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--predictions',
prediction,
prediction,
@@ -928,8 +928,8 @@ def test_visualisation_cconfidence_thresholding_output_saved(csv_filename):
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
- probability = exp_dir_name + '/{}_probabilities.npy'.format(field_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
+ probability = exp_dir_name + '/{}_probabilities.npy'.format(output_feature_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
test_cmd_pdf = ['python',
@@ -939,8 +939,8 @@ def test_visualisation_cconfidence_thresholding_output_saved(csv_filename):
'confidence_thresholding',
'--ground_truth',
ground_truth,
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--probabilities',
probability,
probability,
@@ -993,8 +993,8 @@ def test_visualisation_confidence_thresholding_data_vs_acc_output_saved(
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
- probability = exp_dir_name + '/{}_probabilities.npy'.format(field_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
+ probability = exp_dir_name + '/{}_probabilities.npy'.format(output_feature_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
test_cmd_pdf = ['python',
@@ -1004,8 +1004,8 @@ def test_visualisation_confidence_thresholding_data_vs_acc_output_saved(
'confidence_thresholding_data_vs_acc',
'--ground_truth',
ground_truth,
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--probabilities',
probability,
probability,
@@ -1058,8 +1058,8 @@ def test_visualisation_confidence_thresholding_data_vs_acc_subset_output_saved(
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
- probability = exp_dir_name + '/{}_probabilities.npy'.format(field_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
+ probability = exp_dir_name + '/{}_probabilities.npy'.format(output_feature_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
test_cmd_pdf = ['python',
@@ -1069,8 +1069,8 @@ def test_visualisation_confidence_thresholding_data_vs_acc_subset_output_saved(
'confidence_thresholding_data_vs_acc_subset',
'--ground_truth',
ground_truth,
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--probabilities',
probability,
probability,
@@ -1125,8 +1125,8 @@ def test_vis_confidence_thresholding_data_vs_acc_subset_per_class_output_saved(
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
- probability = exp_dir_name + '/{}_probabilities.npy'.format(field_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
+ probability = exp_dir_name + '/{}_probabilities.npy'.format(output_feature_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
ground_truth_metadata = experiment_source_data_name + '.json'
@@ -1139,8 +1139,8 @@ def test_vis_confidence_thresholding_data_vs_acc_subset_per_class_output_saved(
ground_truth,
'--ground_truth_metadata',
ground_truth_metadata,
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--probabilities',
probability,
probability,
@@ -1202,13 +1202,13 @@ def test_vis_confidence_thresholding_2thresholds_2d_output_saved(
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- treshhold_field1 = get_output_field_name(exp_dir_name)
- treshhold_field2 = get_output_field_name(exp_dir_name, output_feature=1)
+ treshhold_output_feature_name1 = get_output_feature_name(exp_dir_name)
+ treshhold_output_feature_name2 = get_output_feature_name(exp_dir_name, output_feature=1)
probability1 = exp_dir_name + '/{}_probabilities.npy'.format(
- treshhold_field1
+ treshhold_output_feature_name1
)
probability2 = exp_dir_name + '/{}_probabilities.npy'.format(
- treshhold_field2
+ treshhold_output_feature_name2
)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
@@ -1222,9 +1222,9 @@ def test_vis_confidence_thresholding_2thresholds_2d_output_saved(
'--probabilities',
probability1,
probability2,
- '--threshold_fields',
- treshhold_field1,
- treshhold_field2,
+ '--threshold_output_feature_names',
+ treshhold_output_feature_name1,
+ treshhold_output_feature_name2,
'--model_names',
'Model1',
'-od', exp_dir_name]
@@ -1278,13 +1278,13 @@ def test_vis_confidence_thresholding_2thresholds_3d_output_saved(csv_filename):
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- treshhold_field1 = get_output_field_name(exp_dir_name)
- treshhold_field2 = get_output_field_name(exp_dir_name, output_feature=1)
+ treshhold_output_feature_name1 = get_output_feature_name(exp_dir_name)
+ treshhold_output_feature_name2 = get_output_feature_name(exp_dir_name, output_feature=1)
probability1 = exp_dir_name + '/{}_probabilities.npy'.format(
- treshhold_field1
+ treshhold_output_feature_name1
)
probability2 = exp_dir_name + '/{}_probabilities.npy'.format(
- treshhold_field2
+ treshhold_output_feature_name2
)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
@@ -1298,9 +1298,9 @@ def test_vis_confidence_thresholding_2thresholds_3d_output_saved(csv_filename):
'--probabilities',
probability1,
probability2,
- '--threshold_fields',
- treshhold_field1,
- treshhold_field2,
+ '--threshold_output_feature_names',
+ treshhold_output_feature_name1,
+ treshhold_output_feature_name2,
'-od', exp_dir_name]
test_cmd_png = test_cmd_pdf.copy() + ['-ff', 'png']
@@ -1352,8 +1352,8 @@ def test_visualisation_binary_threshold_vs_metric_output_saved(csv_filename):
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
- probability = exp_dir_name + '/{}_probabilities.npy'.format(field_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
+ probability = exp_dir_name + '/{}_probabilities.npy'.format(output_feature_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
test_cmd_pdf = ['python',
@@ -1367,8 +1367,8 @@ def test_visualisation_binary_threshold_vs_metric_output_saved(csv_filename):
'accuracy',
'--ground_truth',
ground_truth,
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--probabilities',
probability,
probability,
@@ -1419,8 +1419,8 @@ def test_visualisation_roc_curves_output_saved(csv_filename):
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
- probability = exp_dir_name + '/{}_probabilities.npy'.format(field_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
+ probability = exp_dir_name + '/{}_probabilities.npy'.format(output_feature_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
ground_truth_metadata = experiment_source_data_name + '.json'
@@ -1437,8 +1437,8 @@ def test_visualisation_roc_curves_output_saved(csv_filename):
ground_truth,
'--ground_truth_metadata',
ground_truth_metadata,
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--probabilities',
probability,
probability,
@@ -1487,7 +1487,7 @@ def test_visualisation_roc_curves_from_test_statistics_output_saved(
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
test_stats = exp_dir_name + '/test_statistics.json'
experiment_source_data_name = csv_filename.split('.')[0]
test_cmd_pdf = ['python',
@@ -1495,8 +1495,8 @@ def test_visualisation_roc_curves_from_test_statistics_output_saved(
'ludwig.visualize',
'--visualization',
'roc_curves_from_test_statistics',
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--test_statistics',
test_stats,
'--model_names',
@@ -1545,8 +1545,8 @@ def test_visualisation_calibration_1_vs_all_output_saved(csv_filename):
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
- probability = exp_dir_name + '/{}_probabilities.npy'.format(field_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
+ probability = exp_dir_name + '/{}_probabilities.npy'.format(output_feature_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
test_cmd_pdf = ['python',
@@ -1558,8 +1558,8 @@ def test_visualisation_calibration_1_vs_all_output_saved(csv_filename):
'accuracy',
'--ground_truth',
ground_truth,
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--probabilities',
probability,
probability,
@@ -1612,8 +1612,8 @@ def test_visualisation_calibration_multiclass_output_saved(csv_filename):
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
- probability = exp_dir_name + '/{}_probabilities.npy'.format(field_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
+ probability = exp_dir_name + '/{}_probabilities.npy'.format(output_feature_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
test_cmd_pdf = ['python',
@@ -1623,8 +1623,8 @@ def test_visualisation_calibration_multiclass_output_saved(csv_filename):
'calibration_multiclass',
'--ground_truth',
ground_truth,
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--probabilities',
probability,
probability,
@@ -1675,7 +1675,7 @@ def test_visualisation_frequency_vs_f1_output_saved(csv_filename):
)
vis_output_pattern_pdf = exp_dir_name + '/*.pdf'
vis_output_pattern_png = exp_dir_name + '/*.png'
- field_name = get_output_field_name(exp_dir_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
test_stats = exp_dir_name + '/test_statistics.json'
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth_metadata = experiment_source_data_name + '.json'
@@ -1686,8 +1686,8 @@ def test_visualisation_frequency_vs_f1_output_saved(csv_filename):
'frequency_vs_f1',
'--ground_truth_metadata',
ground_truth_metadata,
- '--field',
- field_name,
+ '--output_feature_name',
+ output_feature_name,
'--test_statistics',
test_stats,
test_stats,
@@ -1735,32 +1735,32 @@ def test_load_ground_truth_split_from_file(csv_filename):
output_features,
data_csv=rel_path
)
- field_name = get_output_field_name(exp_dir_name)
+ output_feature_name = get_output_feature_name(exp_dir_name)
experiment_source_data_name = csv_filename.split('.')[0]
ground_truth = experiment_source_data_name + '.hdf5'
- ground_truth_train_split = load_from_file(ground_truth, field_name,
+ ground_truth_train_split = load_from_file(ground_truth, output_feature_name,
ground_truth_split=0)
- ground_truth_val_split = load_from_file(ground_truth, field_name,
+ ground_truth_val_split = load_from_file(ground_truth, output_feature_name,
ground_truth_split=1)
- ground_truth_test_split = load_from_file(ground_truth, field_name)
+ ground_truth_test_split = load_from_file(ground_truth, output_feature_name)
test_df, train_df, val_df = obtain_df_splits(csv_filename)
- target_predictions_from_train = train_df[field_name]
- target_predictions_from_val = val_df[field_name]
- target_predictions_from_test = test_df[field_name]
+ target_predictions_from_train = train_df[output_feature_name]
+ target_predictions_from_val = val_df[output_feature_name]
+ target_predictions_from_test = test_df[output_feature_name]
gtm_name = experiment_source_data_name + '.json'
ground_truth_metadata = load_json(gtm_name)
ground_truth_loaded_train_split = np.asarray([
- ground_truth_metadata[field_name]['str2idx'][train_row]
+ ground_truth_metadata[output_feature_name]['str2idx'][train_row]
for train_row in target_predictions_from_train
])
ground_truth_loaded_val_split = np.asarray([
- ground_truth_metadata[field_name]['str2idx'][val_row]
+ ground_truth_metadata[output_feature_name]['str2idx'][val_row]
for val_row in target_predictions_from_val
])
ground_truth_loaded_test_split = np.asarray([
- ground_truth_metadata[field_name]['str2idx'][test_row]
+ ground_truth_metadata[output_feature_name]['str2idx'][test_row]
for test_row in target_predictions_from_test
])
diff --git a/tests/integration_tests/test_visualization_api.py b/tests/integration_tests/test_visualization_api.py
--- a/tests/integration_tests/test_visualization_api.py
+++ b/tests/integration_tests/test_visualization_api.py
@@ -78,20 +78,20 @@ def __init__(self, csv_filename):
self.test_stats_full = self.model.test(
data_df=test_df
)
- self.field = self.output_features[0]['name']
+ self.output_feature_name = self.output_features[0]['name']
# probabilities need to be list of lists containing each row data
# from the probability columns
# ref: https://uber.github.io/ludwig/api/#test - Return
self.probability = self.test_stats_full[0].iloc[:, 2:].values
self.ground_truth_metadata = self.model.train_set_metadata
- target_predictions = test_df[self.field]
+ target_predictions = test_df[self.output_feature_name]
self.ground_truth = np.asarray([
- self.ground_truth_metadata[self.field]['str2idx'][test_row]
+ self.ground_truth_metadata[self.output_feature_name]['str2idx'][test_row]
for test_row in target_predictions
])
self.prediction_raw = self.test_stats_full[0].iloc[:, 0].tolist()
self.prediction = np.asarray([
- self.ground_truth_metadata[self.field]['str2idx'][pred_row]
+ self.ground_truth_metadata[self.output_feature_name]['str2idx'][pred_row]
for pred_row in self.prediction_raw])
def setup_model(self):
@@ -137,7 +137,7 @@ def test_learning_curves_vis_api(csv_filename):
viz_output)
visualize.learning_curves(
experiment.train_stats,
- field=None,
+ output_feature_name=None,
output_directory=experiment.model.exp_dir_name,
file_format=viz_output
)
@@ -161,7 +161,7 @@ def test_compare_performance_vis_api(csv_filename):
)
visualize.compare_performance(
[test_stats, test_stats],
- field=None,
+ output_feature_name=None,
model_namess=['Model1', 'Model2'],
output_directory=experiment.model.exp_dir_name,
file_format=viz_output
@@ -213,7 +213,7 @@ def test_compare_classifier_performance_from_pred_vis_api(csv_filename):
[prediction, prediction],
experiment.ground_truth,
experiment.ground_truth_metadata,
- experiment.field,
+ experiment.output_feature_name,
labels_limit=0,
model_namess=['Model1', 'Model2'],
output_directory=experiment.model.exp_dir_name,
@@ -294,7 +294,7 @@ def test_compare_classifiers_multiclass_multimetric_vis_api(csv_filename):
visualize.compare_classifiers_multiclass_multimetric(
[test_stats, test_stats],
experiment.ground_truth_metadata,
- experiment.field,
+ experiment.output_feature_name,
top_n_classes=[6],
model_namess=['Model1', 'Model2'],
output_directory=experiment.model.exp_dir_name,
@@ -456,7 +456,7 @@ def test_confidence_thresholding_data_vs_acc_subset_per_class_vis_api(
[probability, probability],
experiment.ground_truth,
experiment.ground_truth_metadata,
- experiment.field,
+ experiment.output_feature_name,
top_n_classes=[3],
labels_limit=0,
subset='ground_truth',
@@ -502,22 +502,22 @@ def test_confidence_thresholding_2thresholds_2d_vis_api(csv_filename):
data_df=test_df
)
- field1 = output_features[0]['name']
- field2 = output_features[1]['name']
+ output_feature_name1 = output_features[0]['name']
+ output_feature_name2 = output_features[1]['name']
# probabilities need to be list of lists containing each row data from the
# probability columns ref: https://uber.github.io/ludwig/api/#test - Return
probability1 = test_stats[0].iloc[:, [2, 3, 4]].values
probability2 = test_stats[0].iloc[:, [7, 8, 9]].values
ground_truth_metadata = model.train_set_metadata
- target_predictions1 = test_df[field1]
- target_predictions2 = test_df[field2]
+ target_predictions1 = test_df[output_feature_name1]
+ target_predictions2 = test_df[output_feature_name2]
ground_truth1 = np.asarray([
- ground_truth_metadata[field1]['str2idx'][prediction]
+ ground_truth_metadata[output_feature_name1]['str2idx'][prediction]
for prediction in target_predictions1
])
ground_truth2 = np.asarray([
- ground_truth_metadata[field2]['str2idx'][prediction]
+ ground_truth_metadata[output_feature_name2]['str2idx'][prediction]
for prediction in target_predictions2
])
viz_outputs = ('pdf', 'png')
@@ -526,7 +526,7 @@ def test_confidence_thresholding_2thresholds_2d_vis_api(csv_filename):
visualize.confidence_thresholding_2thresholds_2d(
[probability1, probability2],
[ground_truth1, ground_truth2],
- [field1, field2],
+ [output_feature_name1, output_feature_name2],
labels_limit=0,
model_names=['Model1'],
output_directory=model.exp_dir_name,
@@ -568,22 +568,22 @@ def test_confidence_thresholding_2thresholds_3d_vis_api(csv_filename):
data_df=test_df
)
- field1 = output_features[0]['name']
- field2 = output_features[1]['name']
+ output_feature_name1 = output_features[0]['name']
+ output_feature_name2 = output_features[1]['name']
# probabilities need to be list of lists containing each row data from the
# probability columns ref: https://uber.github.io/ludwig/api/#test - Return
probability1 = test_stats[0].iloc[:, [2, 3, 4]].values
probability2 = test_stats[0].iloc[:, [7, 8, 9]].values
ground_truth_metadata = model.train_set_metadata
- target_predictions1 = test_df[field1]
- target_predictions2 = test_df[field2]
+ target_predictions1 = test_df[output_feature_name1]
+ target_predictions2 = test_df[output_feature_name2]
ground_truth1 = np.asarray([
- ground_truth_metadata[field1]['str2idx'][prediction]
+ ground_truth_metadata[output_feature_name1]['str2idx'][prediction]
for prediction in target_predictions1
])
ground_truth2 = np.asarray([
- ground_truth_metadata[field2]['str2idx'][prediction]
+ ground_truth_metadata[output_feature_name2]['str2idx'][prediction]
for prediction in target_predictions2
])
viz_outputs = ('pdf', 'png')
@@ -592,7 +592,7 @@ def test_confidence_thresholding_2thresholds_3d_vis_api(csv_filename):
visualize.confidence_thresholding_2thresholds_3d(
[probability1, probability2],
[ground_truth1, ground_truth2],
- [field1, field2],
+ [output_feature_name1, output_feature_name2],
labels_limit=0,
output_directory=model.exp_dir_name,
file_format=viz_output
@@ -670,7 +670,7 @@ def test_roc_curves_from_test_statistics_vis_api(csv_filename):
# Generate test data
data_csv = generate_data(input_features, output_features, csv_filename)
- field = output_features[0]['name']
+ output_feature_name = output_features[0]['name']
input_features[0]['encoder'] = encoder
model = run_api_experiment(input_features, output_features)
data_df = read_csv(data_csv)
@@ -681,7 +681,7 @@ def test_roc_curves_from_test_statistics_vis_api(csv_filename):
vis_output_pattern_pdf = model.exp_dir_name + '/*.{}'.format(viz_output)
visualize.roc_curves_from_test_statistics(
[test_stats, test_stats],
- field,
+ output_feature_name,
model_namess=['Model1', 'Model2'],
output_directory=model.exp_dir_name,
file_format=viz_output
@@ -760,7 +760,7 @@ def test_confusion_matrix_vis_api(csv_filename):
visualize.confusion_matrix(
[test_stats, test_stats],
experiment.ground_truth_metadata,
- experiment.field,
+ experiment.output_feature_name,
top_n_classes=[0],
normalize=False,
model_names=['Model1', 'Model2'],
@@ -788,7 +788,7 @@ def test_frequency_vs_f1_vis_api(csv_filename):
visualize.frequency_vs_f1(
[test_stats, test_stats],
experiment.ground_truth_metadata,
- experiment.field,
+ experiment.output_feature_name,
top_n_classes=[0],
model_names=['Model1', 'Model2'],
output_directory=experiment.model.exp_dir_name,
| Troubles with visualisation
Hello, I am having troubles with visualising using the few commands of code here, I am hoping you could advise me.
**This is the training and testing codes:**

**This is the one for learning curves which I have no problems in generating**

**For Learning curves**
I cannot seem to figure what files i could use for the probability and ground truth and not sure if they are the correct ones.

**For Confusion matrix**
Same issue, not sure which are my fields are, hence I am getting many errors.

For further information, the only data that appear in my results are as follows:


It would be great if you could advise me . Thank you very much.
| @SSicaaJL we're going to work on improving the visualization documentation. In the meantime, follow this for confidence thresholding:
```code
def confidence_thresholding_cli(
probabilities,
ground_truth,
ground_truth_split,
field,
**kwargs
):
"""Load model data from files to be shown by confidence_thresholding.
:param probabilities: Path to experiment probabilities file
:param ground_truth: Path to ground truth file
:param ground_truth_split: Type of ground truth split - train, val, test
:param field: Target prediction field
:param kwargs: model configuration arguments
:return None:
"""
```
for confusion matrix:
```code
def confusion_matrix_cli(test_statistics, ground_truth_metadata, **kwargs):
"""Load model data from files to be shown by confusion_matrix.
:param test_statistics: Path to experiment test statistics file
:param ground_truth_metadata: Path to ground truth metadata file
:param kwargs: model configuration arguments
:return None:
"""
```
sorry for the late response.
@SSicaaJL
The filed parameter is the name of the column / output feature you want to visualize. In your case it would be `field = "RainTomorrow"`.
For the learning curves:
I don't know how you obtained train_stats. If you got it from the API `model.train()` you can use it as it is, otherwise you just have to load the `training_statistics.json` file that was saved at the end of training. After you do that `learning_curves(train_stats, field, output_directory=output_dir, file_format='png')` should work.
For the confusion matrix:
just use the field parameter as i specified and your command should work.
Will update the docs to try to explain better what is `field`. Please conferm this solves your issues. | 2019-10-08T01:00:14 |
ludwig-ai/ludwig | 551 | ludwig-ai__ludwig-551 | [
"547"
] | 5c1ff277d7c394d0084e7b6b25b7b3e1b65436c6 | diff --git a/ludwig/visualize.py b/ludwig/visualize.py
--- a/ludwig/visualize.py
+++ b/ludwig/visualize.py
@@ -682,17 +682,22 @@ def learning_curves(
):
"""Show how model measures change over training and validation data epochs.
- For each model and for each output feature and measure of the model,
- it produces a line plot showing how that measure changed over the course
- of the epochs of training on the training and validation sets.
- :param train_stats_per_model: List containing train statistics per model
- :param output_feature_name: Name of the output feature that is predicted
+ For each model and for each output feature and measure of the model,
+ it produces a line plot showing how that measure changed over the course
+ of the epochs of training on the training and validation sets.
+
+ # Inputs
+
+ :param train_stats_per_model: (list) List containing train statistics per model
+ :param output_feature_name: (string) Name of the output feature that is predicted
and for which is provided ground truth
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+ :return: (None)
"""
filename_template = 'learning_curves_{}_{}.' + file_format
filename_template_path = generate_filename_template_path(
@@ -754,13 +759,19 @@ def compare_performance(
For each model (in the aligned lists of test_statistics and model_names)
it produces bars in a bar plot, one for each overall metric available
in the test_statistics file for the specified output_feature_name.
- :param test_stats_per_model: List containing train statistics per model
- :param output_feature_name: Name of the output feature that is predicted and for which is provided ground truth
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+
+ # Inputs
+
+ :param test_stats_per_model: (list) List containing train statistics per model
+ :param output_feature_name: (string) Name of the output feature that is predicted and for which is provided ground truth
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+
+ :return: (None)
"""
filename_template = 'compare_performance_{}.' + file_format
filename_template_path = generate_filename_template_path(
@@ -833,16 +844,22 @@ def compare_classifiers_performance_from_prob(
For each model it produces bars in a bar plot, one for each overall metric
computed on the fly from the probabilities of predictions for the specified
output_feature_name.
- :param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing ground truth data
- :param top_n_classes: List containing the number of classes to plot
- :param labels_limit: Maximum numbers of labels.
+
+ # Inputs
+
+ :param probabilities_per_model: (list) List of model probabilities
+ :param ground_truth: (ndarray) NumPy Array containing ground truth data
+ :param top_n_classes: (list) List containing the number of classes to plot
+ :param labels_limit: (int) Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+
+ :return: (None)
"""
top_n_classes_list = convert_to_list(top_n_classes)
k = top_n_classes_list[0]
@@ -912,10 +929,13 @@ def compare_classifiers_performance_from_pred(
For each model it produces bars in a bar plot, one for each overall metric
computed on the fly from the predictions for the specified output_feature_name.
- :param predictions_per_model: List containing the model predictions
+
+ # Inputs
+
+ :param predictions_per_model: (list) List containing the model predictions
for the specified output_feature_name
- :param ground_truth: NumPy Array containing ground truth data
- :param metadata: Model's input metadata
+ :param ground_truth: (ndarray) NumPy Array containing ground truth data
+ :param metadata: (dict) Model's input metadata
:param output_feature_name: output_feature_name containing ground truth
:param labels_limit: Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
@@ -923,7 +943,10 @@ def compare_classifiers_performance_from_pred(
:param output_directory: Directory where to save plots.
If not specified, plots will be displayed in a window
:param file_format: File format of output plots - pdf or png
- :return None:
+
+ # Return
+
+ :return: (None)
"""
if labels_limit > 0:
ground_truth[ground_truth > labels_limit] = labels_limit
@@ -996,16 +1019,22 @@ def compare_classifiers_performance_subset(
specified output_feature_name, considering only a subset of the full training set.
The way the subset is obtained is using the top_n_classes and
subset parameters.
- :param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing ground truth data
- :param top_n_classes: List containing the number of classes to plot
- :param labels_limit: Maximum numbers of labels.
- :param subset: Type of the subset filtering
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+
+ # Inputs
+
+ :param probabilities_per_model: (list) List of model probabilities
+ :param ground_truth: (ndarray) NumPy Array containing ground truth data
+ :param top_n_classes: (list) List containing the number of classes to plot
+ :param labels_limit: (int) Maximum numbers of labels.
+ :param subset: () Type of the subset filtering
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+
+ :return: (None)
"""
top_n_classes_list = convert_to_list(top_n_classes)
k = top_n_classes_list[0]
@@ -1105,16 +1134,22 @@ def compare_classifiers_performance_changing_k(
For each model it produces a line plot that shows the Hits@K measure
(that counts a prediction as correct if the model produces it among the
first k) while changing k from 1 to top_k for the specified output_feature_name.
- :param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing ground truth data
- :param top_k: Number of elements in the ranklist to consider
- :param labels_limit: Maximum numbers of labels.
+
+ # Inputs
+
+ :param probabilities_per_model: (list) List of model probabilities
+ :param ground_truth: (ndarray) NumPy Array containing ground truth data
+ :param top_k: (int) Number of elements in the ranklist to consider
+ :param labels_limit: (int) Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+
+ :return: (None)
"""
k = top_k
if labels_limit > 0:
@@ -1169,16 +1204,20 @@ def compare_classifiers_multiclass_multimetric(
For each model it produces four plots that show the precision,
recall and F1 of the model on several classes for the specified output_feature_name.
- :param test_stats_per_model: List containing train statistics per model
- :param metadata: Model's input metadata
- :param output_feature_name: Name of the output feature that is predicted and for which is provided ground truth
- :param top_n_classes: List containing the number of classes to plot
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+
+ # Inputs
+
+ :param test_stats_per_model: (list) List containing train statistics per model
+ :param metadata: (dict) Model's input metadata
+ :param output_feature_name: (string) Name of the output feature that is predicted and for which is provided ground truth
+ :param top_n_classes: (list) List containing the number of classes to plot
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
- :return:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+ :return: (None)
"""
filename_template = 'compare_classifiers_multiclass_multimetric_{}_{}_{}.' \
+ file_format
@@ -1338,15 +1377,20 @@ def compare_classifiers_predictions(
):
"""Show two models comparision of their output_feature_name predictions.
- :param predictions_per_model: List containing the model predictions
- :param ground_truth: NumPy Array containing ground truth data
- :param labels_limit: Maximum numbers of labels.
+ # Inputs
+
+ :param predictions_per_model: (list) List containing the model predictions
+ :param ground_truth: (ndarray) NumPy Array containing ground truth data
+ :param labels_limit: (int) Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+
+ :return: (None)
"""
model_names_list = convert_to_list(model_names)
name_c1 = (
@@ -1469,15 +1513,21 @@ def compare_classifiers_predictions_distribution(
This visualization produces a radar plot comparing the distributions of
predictions of the models for the first 10 classes of the specified output_feature_name.
- :param predictions_per_model: List containing the model predictions
- :param ground_truth: NumPy Array containing ground truth data
- :param labels_limit: Maximum numbers of labels.
+
+ # Inputs
+
+ :param predictions_per_model: (list) List containing the model predictions
+ :param ground_truth: (ndarray) NumPy Array containing ground truth data
+ :param labels_limit: (int) Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+
+ :return: (None)
"""
model_names_list = convert_to_list(model_names)
if labels_limit > 0:
@@ -1530,15 +1580,21 @@ def confidence_thresholding(
For each model it produces a pair of lines indicating the accuracy of
the model and the data coverage while increasing a threshold (x axis) on
the probabilities of predictions for the specified output_feature_name.
- :param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing ground truth data
- :param labels_limit: Maximum numbers of labels.
+
+ # Inputs
+
+ :param probabilities_per_model: (list) List of model probabilities
+ :param ground_truth: (ndarray) NumPy Array containing ground truth data
+ :param labels_limit: (int) Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (sting) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+
+ :return: (None)
"""
if labels_limit > 0:
ground_truth[ground_truth > labels_limit] = labels_limit
@@ -1613,15 +1669,20 @@ def confidence_thresholding_data_vs_acc(
confidence_thresholding is that it uses two axes instead of three,
not visualizing the threshold and having coverage as x axis instead of
the threshold.
- :param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing ground truth data
- :param labels_limit: Maximum numbers of labels.
+
+ # Inputs
+
+ :param probabilities_per_model: (list) List of model probabilities
+ :param ground_truth: (ndarray) NumPy Array containing ground truth data
+ :param labels_limit:(int) Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+ :return: (None)
"""
if labels_limit > 0:
ground_truth[ground_truth > labels_limit] = labels_limit
@@ -1707,16 +1768,22 @@ def confidence_thresholding_data_vs_acc_subset(
that is within the top n most frequent ones will be considered as test set,
and the percentage of datapoints that have been kept from the original set
will be displayed for each model.
- :param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing ground truth data
- :param top_n_classes: List containing the number of classes to plot
- :param labels_limit: Maximum numbers of labels.
- :param subset: Type of the subset filtering
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+
+ # Inputs
+
+ :param probabilities_per_model: (list) List of model probabilities
+ :param ground_truth: (ndarray) NumPy Array containing ground truth data
+ :param top_n_classes: (list) List containing the number of classes to plot
+ :param labels_limit: (int) Maximum numbers of labels.
+ :param subset: (string) Type of the subset filtering
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+
+ :return: (None)
"""
top_n_classes_list = convert_to_list(top_n_classes)
k = top_n_classes_list[0]
@@ -1832,17 +1899,22 @@ def confidence_thresholding_data_vs_acc_subset_per_class(
The difference with confidence_thresholding_data_vs_acc_subset is that it
produces one plot per class within the top_n_classes.
- :param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing ground truth data
- :param metadata: Model's input metadata
- :param top_n_classes: List containing the number of classes to plot
- :param labels_limit: Maximum numbers of labels.
- :param subset: Type of the subset filtering
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+
+ # Inputs
+
+ :param probabilities_per_model: (list) List of model probabilities
+ :param ground_truth: (ndarray) NumPy Array containing ground truth data
+ :param metadata: (dict) Model's input metadata
+ :param top_n_classes: (list) List containing the number of classes to plot
+ :param labels_limit: (int) Maximum numbers of labels.
+ :param subset: (string) Type of the subset filtering
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+ :return: (None)
"""
filename_template = \
'confidence_thresholding_data_vs_acc_subset_per_class_{}.' + file_format
@@ -1947,16 +2019,22 @@ def confidence_thresholding_2thresholds_2d(
threshold_output_feature_names as x and y axes and either the data coverage percentage or
the accuracy as z axis. Each line represents a slice of the data
coverage surface projected onto the accuracy surface.
- :param probabilities_per_model: List of model probabilities
- :param ground_truths: List of NumPy Arrays containing ground truth data
- :param threshold_output_feature_names: List of output_feature_names for 2d threshold
- :param labels_limit: Maximum numbers of labels.
+
+ # Inputs
+
+ :param probabilities_per_model: (list) List of model probabilities
+ :param ground_truths: (list) List of NumPy Arrays containing ground truth data
+ :param threshold_output_feature_names: (list) List of output_feature_names for 2d threshold
+ :param labels_limit: (int) Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
- :param model_names: Name of the model to use as label.
- :param output_directory: Directory where to save plots.
+ :param model_names: (string) Name of the model to use as label.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+
+ :return: (None)
"""
try:
validate_conf_treshholds_and_probabilities_2d_3d(
@@ -2133,15 +2211,21 @@ def confidence_thresholding_2thresholds_3d(
confidence_thresholding_2thresholds_3d that have thresholds on the
confidence of the predictions of the two threshold_output_feature_names as x and y axes
and either the data coverage percentage or the accuracy as z axis.
- :param probabilities_per_model: List of model probabilities
- :param ground_truths: List of NumPy Arrays containing ground truth data
- :param threshold_output_feature_names: List of output_feature_names for 2d threshold
- :param labels_limit: Maximum numbers of labels.
+
+ # Inputs
+
+ :param probabilities_per_model: (list) List of model probabilities
+ :param ground_truths: (list) List of NumPy Arrays containing ground truth data
+ :param threshold_output_feature_names: (list) List of output_feature_names for 2d threshold
+ :param labels_limit: (int) Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
- :param output_directory: Directory where to save plots.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+
+ :return: (None)
"""
try:
validate_conf_treshholds_and_probabilities_2d_3d(
@@ -2249,16 +2333,22 @@ def binary_threshold_vs_metric(
considered negative. It needs to be an integer, to figure out the
association between classes and integers check the ground_truth_metadata
JSON file.
- :param probabilities_per_model: List of model probabilities
- :param ground_truth: List of NumPy Arrays containing ground truth data
+
+ # Inputs
+
+ :param probabilities_per_model: (list) List of model probabilities
+ :param ground_truth: (list) List of NumPy Arrays containing ground truth data
:param metrics: metrics to dispay (f1, precision, recall,
accuracy)
- :param positive_label: Label of the positive class
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+ :param positive_label: (string) Label of the positive class
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+
+ :return: (None)
"""
probs = probabilities_per_model
model_names_list = convert_to_list(model_names)
@@ -2359,14 +2449,20 @@ def roc_curves(
be considered negative. It needs to be an integer, to figure out the
association between classes and integers check the ground_truth_metadata
JSON file.
- :param probabilities_per_model: List of model probabilities
- :param ground_truth: List of NumPy Arrays containing ground truth data
- :param positive_label: Label of the positive class
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+
+ # Inputs
+
+ :param probabilities_per_model: (list) List of model probabilities
+ :param ground_truth: (list) List of NumPy Arrays containing ground truth data
+ :param positive_label: (string) Label of the positive class
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+
+ :return: (None)
"""
probs = probabilities_per_model
model_names_list = convert_to_list(model_names)
@@ -2410,13 +2506,19 @@ def roc_curves_from_test_statistics(
This visualization uses the output_feature_name, test_statistics and model_names
parameters. output_feature_name needs to be binary feature. This visualization produces a
line chart plotting the roc curves for the specified output_feature_name.
- :param test_stats_per_model: List containing train statistics per model
- :param output_feature_name: Name of the output feature that is predicted and for which is provided ground truth
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+
+ # Inputs
+
+ :param test_stats_per_model: (list) List containing train statistics per model
+ :param output_feature_name: (string) Name of the output feature that is predicted and for which is provided ground truth
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+
+ :return: (None)
"""
model_names_list = convert_to_list(model_names)
filename_template = 'roc_curves_from_prediction_statistics.' + file_format
@@ -2465,16 +2567,22 @@ def calibration_1_vs_all(
the current class to be the true one and all others to be a false one,
drawing the distribution for each model (in the aligned lists of
probabilities and model_names).
- :param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing ground truth data
- :param top_n_classes: List containing the number of classes to plot
- :param labels_limit: Maximum numbers of labels.
+
+ # Inputs
+
+ :param probabilities_per_model: (list) List of model probabilities
+ :param ground_truth: (ndarray) NumPy Array containing ground truth data
+ :param top_n_classes: (list) List containing the number of classes to plot
+ :param labels_limit: (int) Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # String
+
+ :return: (None)
"""
probs = probabilities_per_model
model_names_list = convert_to_list(model_names)
@@ -2583,15 +2691,20 @@ def calibration_multiclass(
"""Show models probability of predictions for each class of the the
specified output_feature_name.
- :param probabilities_per_model: List of model probabilities
- :param ground_truth: NumPy Array containing ground truth data
- :param labels_limit: Maximum numbers of labels.
+ # Inputs
+
+ :param probabilities_per_model: (list) List of model probabilities
+ :param ground_truth: (ndarray) NumPy Array containing ground truth data
+ :param labels_limit: (int) Maximum numbers of labels.
If labels in dataset are higher than this number, "rare" label
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+
+ :return: (None)
"""
probs = probabilities_per_model
model_names_list = convert_to_list(model_names)
@@ -2689,17 +2802,22 @@ def confusion_matrix(
it produces a heatmap of the confusion matrix in the predictions for
each output_feature_name that has a confusion matrix in test_statistics. The value of
top_n_classes limits the heatmap to the n most frequent classes.
- :param test_stats_per_model: List containing train statistics per model
- :param metadata: Model's input metadata
- :param output_feature_name: Name of the output feature that is predicted and for which is provided ground truth
- :param top_n_classes: List containing the number of classes to plot
- :param normalize: Flag to normalize rows in confusion matrix
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+
+ # Inputs
+
+ :param test_stats_per_model: (string) List containing train statistics per model
+ :param metadata: (dict) Model's input metadata
+ :param output_feature_name: (string) Name of the output feature that is predicted and for which is provided ground truth
+ :param top_n_classes: (list) List containing the number of classes to plot
+ :param normalize: (bool) Flag to normalize rows in confusion matrix
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
- :return:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+
+ :return: (None)
"""
test_stats_per_model_list = test_stats_per_model
model_names_list = convert_to_list(model_names)
@@ -2717,7 +2835,7 @@ def confusion_matrix(
test_stats_per_model_list):
for output_feature_name in output_feature_names:
if 'confusion_matrix' in test_statistics[output_feature_name]:
- confusion_matrix = np.array(
+ _confusion_matrix = np.array(
test_statistics[output_feature_name]['confusion_matrix']
)
model_name_name = model_names_list[i] if (
@@ -2729,12 +2847,12 @@ def confusion_matrix(
metadata[output_feature_name]:
labels = metadata[output_feature_name]['idx2str']
else:
- labels = list(range(len(confusion_matrix)))
+ labels = list(range(len(_confusion_matrix)))
for k in top_n_classes:
- k = (min(k, confusion_matrix.shape[0])
- if k > 0 else confusion_matrix.shape[0])
- cm = confusion_matrix[:k, :k]
+ k = (min(k, _confusion_matrix.shape[0])
+ if k > 0 else _confusion_matrix.shape[0])
+ cm = _confusion_matrix[:k, :k]
if normalize:
with np.errstate(divide='ignore', invalid='ignore'):
cm_norm = np.true_divide(cm,
@@ -2811,16 +2929,21 @@ def frequency_vs_f1(
The second plot has the same structure of the first one,
but the axes are flipped and the classes on the x axis are sorted by
frequency.
- :param test_stats_per_model: List containing train statistics per model
- :param metadata: Model's input metadata
- :param output_feature_name: Name of the output feature that is predicted and for which is provided ground truth
- :param top_n_classes: List containing the number of classes to plot
- :param model_names: List of the names of the models to use as labels.
- :param output_directory: Directory where to save plots.
+
+ # Inputs
+
+ :param test_stats_per_model: (list) List containing train statistics per model
+ :param metadata: (dict) Model's input metadata
+ :param output_feature_name: (string) Name of the output feature that is predicted and for which is provided ground truth
+ :param top_n_classes: (list) List containing the number of classes to plot
+ :param model_names: (list, default: None) List of the names of the models to use as labels.
+ :param output_directory: (string, default: None) Directory where to save plots.
If not specified, plots will be displayed in a window
- :param file_format: File format of output plots - pdf or png
- :return None:
- :return:
+ :param file_format: (string, default: 'pdf') File format of output plots - pdf or png
+
+ # Return
+
+ :return: (None)
"""
test_stats_per_model_list = test_stats_per_model
model_names_list = convert_to_list(model_names)
diff --git a/mkdocs/code_doc_autogen.py b/mkdocs/code_doc_autogen.py
--- a/mkdocs/code_doc_autogen.py
+++ b/mkdocs/code_doc_autogen.py
@@ -132,24 +132,24 @@
OUTPUT_DIR = 'docs'
-def get_function_signature(function, method=True):
- wrapped = getattr(function, '_original_function', None)
+def get_function_signature(_function, _method=True):
+ wrapped = getattr(_function, '_original_function', None)
if wrapped is None:
- signature = inspect.getargspec(function)
+ _signature = inspect.getfullargspec(_function)
else:
- signature = inspect.getargspec(wrapped)
- defaults = signature.defaults
- if method and len(signature.args) > 0 and signature.args[0] == 'self':
- args = signature.args[1:]
+ _signature = inspect.getfullargspec(wrapped)
+ defaults = _signature.defaults
+ if _method and _signature.args and _signature.args[0] == 'self':
+ args = _signature.args[1:]
else:
- args = signature.args
+ args = _signature.args
if defaults:
kwargs = zip(args[-len(defaults):], defaults)
args = args[:-len(defaults)]
else:
kwargs = []
st = '%s.%s(\n' % (
- clean_module_name(function.__module__), function.__name__)
+ clean_module_name(_function.__module__), _function.__name__)
for a in args:
st += ' {},\n'.format(str(a))
@@ -158,32 +158,32 @@ def get_function_signature(function, method=True):
v = '\'' + v + '\''
st += ' {}={},\n'.format(str(a), str(v))
if kwargs or args:
- signature = st[:-2] + '\n)'
+ _signature = st[:-2] + '\n)'
else:
- signature = st + ')'
- return post_process_signature(signature)
+ _signature = st + ')'
+ return post_process_signature(_signature)
-def get_class_signature(cls):
+def get_class_signature(_cls):
try:
- class_signature = get_function_signature(cls.__init__)
- class_signature = class_signature.replace('__init__', cls.__name__)
+ class_signature = get_function_signature(_cls.__init__)
+ class_signature = class_signature.replace('__init__', _cls.__name__)
except (TypeError, AttributeError):
# in case the class inherits from object and does not
# define __init__
class_signature = "{clean_module_name}.{cls_name}()".format(
- clean_module_name=clean_module_name(cls.__module__),
- cls_name=cls.__name__
+ clean_module_name=clean_module_name(_cls.__module__),
+ cls_name=_cls.__name__
)
return post_process_signature(class_signature)
-def post_process_signature(signature):
- parts = re.split(r'\.(?!\d)', signature)
+def post_process_signature(_signature):
+ parts = re.split(r'\.(?!\d)', _signature)
if len(parts) >= 4:
if parts[1] == 'api':
- signature = 'ludwig.' + '.'.join(parts[2:])
- return signature
+ _signature = 'ludwig.' + '.'.join(parts[2:])
+ return _signature
def clean_module_name(name):
@@ -192,20 +192,20 @@ def clean_module_name(name):
return name
-def class_to_docs_link(cls):
- module_name = clean_module_name(cls.__module__)
+def class_to_docs_link(_cls):
+ module_name = clean_module_name(_cls.__module__)
module_name = module_name[6:]
- link = ROOT + module_name.replace('.', '/') + '#' + cls.__name__.lower()
+ link = ROOT + module_name.replace('.', '/') + '#' + _cls.__name__.lower()
return link
-def class_to_source_link(cls):
- module_name = clean_module_name(cls.__module__)
- path = module_name.replace('.', '/')
- path += '.py'
- line = inspect.getsourcelines(cls)[-1]
+def class_to_source_link(_cls):
+ module_name = clean_module_name(_cls.__module__)
+ _path = module_name.replace('.', '/')
+ _path += '.py'
+ line = inspect.getsourcelines(_cls)[-1]
link = ('https://github.com/uber/'
- 'ludwig/blob/master/' + path + '#L' + str(line))
+ 'ludwig/blob/master/' + _path + '#L' + str(line))
return '[[source]](' + link + ')'
@@ -224,18 +224,18 @@ def count_leading_spaces(s):
return 0
-def process_list_block(docstring, starting_point, section_end,
+def process_list_block(_docstring, starting_point, section_end,
leading_spaces, marker):
- ending_point = docstring.find('\n\n', starting_point)
- block = docstring[starting_point:(None if ending_point == -1 else
+ ending_point = _docstring.find('\n\n', starting_point)
+ block = _docstring[starting_point:(None if ending_point == -1 else
ending_point - 1)]
# Place marker for later reinjection.
- docstring_slice = docstring[
+ docstring_slice = _docstring[
starting_point:None if section_end == -1 else section_end].replace(
block, marker)
- docstring = (docstring[:starting_point]
+ _docstring = (_docstring[:starting_point]
+ docstring_slice
- + docstring[section_end:])
+ + _docstring[section_end:])
lines = block.split('\n')
# Remove the computed number of leading white spaces from each line.
lines = [re.sub('^' + ' ' * leading_spaces, '', line) for line in lines]
@@ -294,20 +294,20 @@ def process_list_block(docstring, starting_point, section_end,
lines[i] = '- __return__{}:{}'.format(inside_brackets, line)
block = '\n'.join(lines)
- return docstring, block
+ return _docstring, block
-def process_docstring(docstring):
+def process_docstring(_docstring):
# First, extract code blocks and process them.
code_blocks = []
- if '```' in docstring:
- tmp = docstring[:]
+ if '```' in _docstring:
+ tmp = _docstring[:]
while '```' in tmp:
tmp = tmp[tmp.find('```'):]
index = tmp[3:].find('```') + 6
snippet = tmp[:index]
# Place marker in docstring for later reinjection.
- docstring = docstring.replace(
+ _docstring = _docstring.replace(
snippet, '$CODE_BLOCK_%d' % len(code_blocks))
snippet_lines = snippet.split('\n')
# Remove leading spaces.
@@ -338,83 +338,88 @@ def process_docstring(docstring):
# Format docstring lists.
section_regex = r'\n( +)# (.*)\n'
- section_idx = re.search(section_regex, docstring)
+ section_idx = re.search(section_regex, _docstring)
shift = 0
sections = {}
while section_idx and section_idx.group(2):
anchor = section_idx.group(2)
leading_spaces = len(section_idx.group(1))
shift += section_idx.end()
- next_section_idx = re.search(section_regex, docstring[shift:])
+ next_section_idx = re.search(section_regex, _docstring[shift:])
if next_section_idx is None:
section_end = -1
else:
section_end = shift + next_section_idx.start()
marker = '$' + anchor.replace(' ', '_') + '$'
- docstring, content = process_list_block(docstring,
- shift,
- section_end,
- leading_spaces,
- marker)
+ _docstring, content = process_list_block(_docstring,
+ shift,
+ section_end,
+ leading_spaces,
+ marker)
sections[marker] = content
# `docstring` has changed, so we can't use `next_section_idx` anymore
# we have to recompute it
- section_idx = re.search(section_regex, docstring[shift:])
+ section_idx = re.search(section_regex, _docstring[shift:])
# Format docstring section titles.
- docstring = re.sub(r'\n(\s+)# (.*)\n',
+ _docstring = re.sub(r'\n(\s+)# (.*)\n',
r'\n\1__\2__\n\n',
- docstring)
+ _docstring)
# Strip all remaining leading spaces.
- lines = docstring.split('\n')
- docstring = '\n'.join([line.lstrip(' ') for line in lines])
+ # generator function to resolve deepsource.io major issue
+ def strip_leading_spaces(these_lines):
+ for l in these_lines:
+ yield l.lstrip(' ')
+
+ lines = _docstring.split('\n')
+ _docstring = '\n'.join(strip_leading_spaces(lines))
# Reinject list blocks.
for marker, content in sections.items():
- docstring = docstring.replace(marker, content)
+ _docstring = _docstring.replace(marker, content)
# Reinject code blocks.
for i, code_block in enumerate(code_blocks):
- docstring = docstring.replace(
+ _docstring = _docstring.replace(
'$CODE_BLOCK_%d' % i, code_block)
- return docstring
+ return _docstring
-def read_file(path):
- with open(path) as f:
- return f.read()
+def read_file(_path):
+ with open(_path) as _f:
+ return _f.read()
-def collect_class_methods(cls, methods):
- if isinstance(methods, (list, tuple)):
- return [getattr(cls, m) if isinstance(m, str) else m for m in methods]
- methods = []
- for _, method in inspect.getmembers(cls, predicate=inspect.isroutine):
- if method.__name__[0] == '_' or method.__name__ in EXCLUDE:
+def collect_class_methods(_cls, _methods):
+ if isinstance(_methods, (list, tuple)):
+ return [getattr(_cls, m) if isinstance(m, str) else m for m in _methods]
+ _methods = []
+ for _, _method in inspect.getmembers(_cls, predicate=inspect.isroutine):
+ if _method.__name__[0] == '_' or _method.__name__ in EXCLUDE:
continue
- methods.append(method)
- return methods
+ _methods.append(_method)
+ return _methods
-def render_function(function, method=True):
- subblocks = []
- signature = get_function_signature(function, method=method)
- if method:
- signature = signature.replace(
- clean_module_name(function.__module__) + '.', '')
- subblocks.append('## ' + function.__name__ + '\n')
- subblocks.append(code_snippet(signature))
- docstring = function.__doc__
- if docstring:
- subblocks.append(process_docstring(docstring))
- return '\n\n'.join(subblocks)
+def render_function(_function, _method=True):
+ _subblocks = []
+ _signature = get_function_signature(_function, _method=_method)
+ if _method:
+ _signature = _signature.replace(
+ clean_module_name(_function.__module__) + '.', '')
+ _subblocks.append('## ' + _function.__name__ + '\n')
+ _subblocks.append(code_snippet(_signature))
+ _docstring = _function.__doc__
+ if _docstring:
+ _subblocks.append(process_docstring(_docstring))
+ return '\n\n'.join(_subblocks)
-def read_page_data(page_data, type):
+def read_page_data(_page_data, type):
assert type in ['classes', 'functions', 'methods']
- data = page_data.get(type, [])
- for module in page_data.get('all_module_{}'.format(type), []):
+ data = _page_data.get(type, [])
+ for module in _page_data.get('all_module_{}'.format(type), []):
module_data = []
for name in dir(module):
if name[0] == '_' or name in EXCLUDE:
@@ -459,7 +464,15 @@ def read_page_data(page_data, type):
print('Generating docs for Ludwig %s.' % ludwig.__version__)
for page_data in PAGES:
- classes = read_page_data(page_data, 'classes')
+ classes = []
+ functions = []
+ methods = []
+ if 'classes' in page_data.keys():
+ classes = read_page_data(page_data, 'classes')
+ elif 'functions' in page_data.keys():
+ functions = read_page_data(page_data, 'functions')
+ else:
+ raise TypeError('Invalid type specified in page_data')
blocks = []
for element in classes:
@@ -482,20 +495,24 @@ def read_page_data(page_data, type):
if methods:
subblocks.append('\n---')
subblocks.append('# ' + cls.__name__ + ' methods\n')
- subblocks.append('\n---\n'.join(
- [render_function(method, method=True) for method in
- methods]))
+
+ # generator function to resolve deepsource.io major issue
+ def generate_render_functions(_methods):
+ for m in _methods:
+ yield render_function(m, _method=True)
+
+ subblocks.append('\n---\n'.join(generate_render_functions(methods)))
blocks.append('\n'.join(subblocks))
methods = read_page_data(page_data, 'methods')
for method in methods:
- blocks.append(render_function(method, method=True))
+ blocks.append(render_function(method, _method=True))
functions = read_page_data(page_data, 'functions')
for function in functions:
- blocks.append(render_function(function, method=False))
+ blocks.append(render_function(function, _method=False))
if not blocks:
raise RuntimeError('Found no content for page ' +
| Incorrect formatting of visualization api calls in Ludwig documentation
**Describe the bug**
Most visualization api parameters documentation are not correctly formatted.
**To Reproduce**
Example of incorrect formatting of documentation: https://uber.github.io/ludwig/api/visualization/#learning_curves
<img width="962" alt="api_doc_incorrect_format" src="https://user-images.githubusercontent.com/1425269/67630148-3c5cc000-f859-11e9-9d2e-8d4e2893dd0b.png">
**Expected behavior**
Correct formatting of the parameter list
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Environment (please complete the following information):**
- OS: MacOs 10:15
- Version 10.15
- Python version N/A
- Ludwig version: 0.2.1
**Additional context**
I've researched and modified the module `code_doc_autogen.py` to correctly format the visualization api parameters. In addition, minor updates were made to the docstring for functions in `visualize.py`. Here is an example generated by the revised `code_doc_autogen.py`:
<img width="856" alt="api_doc_correct_format" src="https://user-images.githubusercontent.com/1425269/67630218-e426bd80-f85a-11e9-836f-acb3440c0e6d.png">
If this looks reasonable, I'm willing to finish up the work to correctly format the visualization api calls and submit a PR. Any guidance will be appreciated.
| 2019-10-27T21:08:08 |
||
ludwig-ai/ludwig | 586 | ludwig-ai__ludwig-586 | [
"585"
] | 05d6c6917e10541f08e448d1a756d153ab2dc41e | diff --git a/ludwig/utils/visualization_utils.py b/ludwig/utils/visualization_utils.py
--- a/ludwig/utils/visualization_utils.py
+++ b/ludwig/utils/visualization_utils.py
@@ -90,11 +90,11 @@ def learning_curves_plot(
name_prefix = algorithm_names[
i] + ' ' if algorithm_names is not None and i < len(
algorithm_names) else ''
- ax.plot(xs, train_values[i], label=name_prefix + 'training',
+ ax.plot(xs[:len(train_values[i])], train_values[i], label=name_prefix + 'training',
color=colors[i * 2], linewidth=3)
if i < len(vali_values) and vali_values[i] is not None and len(
vali_values[i]) > 0:
- ax.plot(xs, vali_values[i], label=name_prefix + 'validation',
+ ax.plot(xs[:len(vali_values[i])], vali_values[i], label=name_prefix + 'validation',
color=colors[i * 2 + 1], linewidth=3)
ax.legend()
| learning_curves() function fails with ValueError: x and y must have same first dimension
**Describe the bug**
learning_curves() function fails when plotting two more more models at once. Error message:
```
Traceback (most recent call last):
File "/opt/project/sandbox/mwe.py", line 76, in <module>
learning_curves(train_stats_list,'y', model_names=model_names, output_directory='./viz', file_format='png')
File "/usr/local/lib/python3.6/dist-packages/ludwig-0.2.1-py3.6.egg/ludwig/visualize.py", line 740, in learning_curves
File "/usr/local/lib/python3.6/dist-packages/ludwig-0.2.1-py3.6.egg/ludwig/utils/visualization_utils.py", line 94, in learning_curves_plot
File "/usr/local/lib/python3.6/dist-packages/matplotlib/axes/_axes.py", line 1666, in plot
lines = [*self._get_lines(*args, data=data, **kwargs)]
File "/usr/local/lib/python3.6/dist-packages/matplotlib/axes/_base.py", line 225, in __call__
yield from self._plot_args(this, kwargs)
File "/usr/local/lib/python3.6/dist-packages/matplotlib/axes/_base.py", line 391, in _plot_args
x, y = self._xy_from_xy(x, y)
File "/usr/local/lib/python3.6/dist-packages/matplotlib/axes/_base.py", line 270, in _xy_from_xy
"have shapes {} and {}".format(x.shape, y.shape))
ValueError: x and y must have same first dimension, but have shapes (100,) and (24,)
```
Ludwig version: 0.2.1
**To Reproduce**
This program will reproduce the error:
```
#
# Minimum Working Example for learning curves Plot error with early stopping
#
#%%
import numpy as np
import pandas as pd
import warnings
warnings.simplefilter('ignore')
import ludwig
from ludwig.api import LudwigModel
from ludwig.visualize import learning_curves
print(ludwig.__version__)
#%%
NUM_ROWS = 5000
NUM_COLS = 20
#%% create synthetic training data explanatory variables
np.random.seed(42)
# create relevant explanatory variables
X = np.random.rand(NUM_ROWS, NUM_COLS)
X_df = pd.DataFrame(X)
X_df.columns = ['X_' + '{:0>3d}'.format(i+1) for i in range(NUM_COLS)]
#%% create response variable
np.random.seed(21)
beta = -2 + 4 * np.random.rand(NUM_COLS)
beta = np.insert(beta, 0, 1)
print(beta)
# closure to create response variable
def create_logistic(beta):
return lambda x: 1.0 / (np.exp(-np.sum(np.insert(x, 0, 1) * beta)) + 1)
calc_logistic = create_logistic(beta)
# Create response binary response variable
logit_y = pd.Series(np.apply_along_axis(calc_logistic, 1, X))
print(logit_y.describe())
# create binary classification response variable
y = (logit_y > logit_y.describe()['50%']).astype('int')
y_df = pd.DataFrame(y)
y_df.columns = ['y']
df_train = pd.concat([X_df, y_df], axis=1)
#%% setup two models to train
model_definitions = [
{
'input_features': [{'name': 'X_'+'{:0>3d}'.format(i+1), 'type': 'numerical'} for i in range(NUM_COLS)],
'output_features': [{'name':'y', 'type':'binary', 'fc_layers':[{'fc_size':128}]}]
},
{
'input_features': [{'name': 'X_'+'{:0>3d}'.format(i+1), 'type': 'numerical'} for i in range(NUM_COLS)],
'output_features': [{'name':'y', 'type':'binary', 'fc_layers':[{'fc_size':128}, {'fc_size': 64}, {'fc_size': 32}]}],
}
]
model_names = ['model1', 'model2']
train_stats_list = []
# train the models
for i in range(len(model_definitions)):
print('training model:', model_names[i])
model = LudwigModel(model_definition=model_definitions[i])
train_stats = model.train(data_df=df_train)
train_stats_list.append(train_stats)
# create learning_curve plot for the training
learning_curves(train_stats_list,'y', model_names=model_names, output_directory='./viz', file_format='png')
```
**Expected behavior**
Learning rate curve plot for the multiple model training.
**Screenshots**
See error message above.
**Environment (please complete the following information):**
- OS: MacOS 10.15.2
- Docker Container base image: Ubuntu 18.04
- TensorFlow: 1.14.0
- Python version: 3.6.8
- Ludwig version: 0.2.1
**Additional context**
From what I can tell the error occurs because early stopping caused different number of epochs to be recorded for the two model training runs.
If I can determine root cause, I'll submit a PR to fix.
| 2019-12-02T01:46:40 |
||
ludwig-ai/ludwig | 608 | ludwig-ai__ludwig-608 | [
"462"
] | e8af86cddff04f523d990203db2321b446debcb3 | diff --git a/ludwig/experiment.py b/ludwig/experiment.py
--- a/ludwig/experiment.py
+++ b/ludwig/experiment.py
@@ -22,6 +22,10 @@
import logging
import os
import sys
+import tempfile
+
+import numpy as np
+import pandas as pd
import yaml
from ludwig.contrib import contrib_command
@@ -31,12 +35,12 @@
from ludwig.predict import print_test_results
from ludwig.predict import save_prediction_outputs
from ludwig.predict import save_test_statistics
-from ludwig.train import full_train
-from ludwig.utils.defaults import default_random_seed
+from ludwig.train import full_train, logger
+from ludwig.utils.data_utils import generate_kfold_splits, save_json
+from ludwig.utils.defaults import default_random_seed, merge_with_defaults
from ludwig.utils.print_utils import logging_level_registry
from ludwig.utils.print_utils import print_ludwig
-
logger = logging.getLogger(__name__)
@@ -282,6 +286,156 @@ def experiment(
return experiment_dir_name
+def kfold_cross_validate(
+ k_fold,
+ model_definition=None,
+ model_definition_file=None,
+ data_csv=None,
+ output_directory='results',
+ random_seed=default_random_seed,
+ skip_save_k_fold_split_indices=False,
+ **kwargs
+):
+ """Performs k-fold cross validation.
+
+ # Inputs
+ :param k_fold: (int) number of folds to create for the cross-validation
+ :param model_definition: (dict, default: None) a dictionary containing
+ information needed to build a model. Refer to the [User Guide]
+ (http://ludwig.ai/user_guide/#model-definition) for details.
+ :param model_definition_file: (string, optional, default: `None`) path to
+ a YAML file containing the model definition. If available it will be
+ used instead of the model_definition dict.
+ :param data_csv: (string, default: None)
+ :param output_directory: (string, default: 'results')
+ :param random_seed: (int) Random seed used k-fold splits.
+ :param skip_save_k_fold_split_indices: (boolean, default: False) Disables
+ saving k-fold split indices
+
+ :return: None
+ """
+
+ # check for model_definition and model_definition_file
+ if model_definition is None and model_definition_file is None:
+ raise ValueError(
+ 'Either model_definition of model_definition_file have to be'
+ 'not None to initialize a LudwigModel'
+ )
+ if model_definition is not None and model_definition_file is not None:
+ raise ValueError(
+ 'Only one between model_definition and '
+ 'model_definition_file can be provided'
+ )
+
+ # check for k_fold
+ if k_fold is None:
+ raise ValueError(
+ 'k_fold parameter must be specified'
+ )
+
+ logger.info('starting {:d}-fold cross validation'.format(k_fold))
+
+ # create output_directory if not available
+ if not os.path.isdir(output_directory):
+ os.mkdir(output_directory)
+
+ # read in data to split for the folds
+ data_df = pd.read_csv(data_csv)
+
+ # place each fold in a separate directory
+ data_dir = os.path.dirname(data_csv)
+ kfold_training_stats = {}
+ kfold_split_indices = {}
+ for train_indices, test_indices, fold_num in \
+ generate_kfold_splits(data_df, k_fold, random_seed):
+ with tempfile.TemporaryDirectory(dir=data_dir) as temp_dir_name:
+ curr_train_df = data_df.iloc[train_indices]
+ curr_test_df = data_df.iloc[test_indices]
+
+ if not skip_save_k_fold_split_indices:
+ kfold_split_indices['fold_' + str(fold_num)] = {
+ 'training_indices': train_indices,
+ 'test_indices': test_indices
+ }
+
+ # train and validate model on this fold
+ if model_definition_file is not None:
+ with open(model_definition_file, 'r') as def_file:
+ model_definition = \
+ merge_with_defaults(yaml.safe_load(def_file))
+ logger.info("training on fold {:d}".format(fold_num))
+ (model,
+ preprocessed_data,
+ _,
+ train_stats,
+ model_definition) = full_train(
+ model_definition,
+ data_train_df=curr_train_df,
+ data_test_df=curr_test_df,
+ experiment_name='cross_validation',
+ model_name='fold_' + str(fold_num),
+ output_directory=os.path.join(temp_dir_name, 'results')
+ )
+
+ # score on hold out fold
+ eval_batch_size = model_definition['training']['eval_batch_size']
+ batch_size = model_definition['training']['batch_size']
+ preds = model.predict(
+ preprocessed_data[2],
+ eval_batch_size if eval_batch_size != 0 else batch_size
+ )
+
+ # augment the training statistics with scoring metric fron
+ # the hold out fold
+ train_stats['fold_metric'] = {}
+ for metric_category in preds:
+ train_stats['fold_metric'][metric_category] = {}
+ for metric in preds[metric_category]:
+ train_stats['fold_metric'][metric_category][metric] = \
+ preds[metric_category][metric]
+
+ # collect training statistics for this fold
+ kfold_training_stats['fold_' + str(fold_num)] = train_stats
+
+ # consolidate raw fold metrics across all folds
+ raw_kfold_stats = {}
+ for fold_name in kfold_training_stats:
+ for category in kfold_training_stats[fold_name]['fold_metric']:
+ if category not in raw_kfold_stats:
+ raw_kfold_stats[category] = {}
+ category_stats = \
+ kfold_training_stats[fold_name]['fold_metric'][category]
+ for metric in category_stats:
+ if metric not in {'predictions', 'probabilities'}:
+ if metric not in raw_kfold_stats[category]:
+ raw_kfold_stats[category][metric] = []
+ raw_kfold_stats[category][metric] \
+ .append(category_stats[metric])
+
+ # calculate overall kfold statistics
+ overall_kfold_stats = {}
+ for category in raw_kfold_stats:
+ overall_kfold_stats[category] = {}
+ for metric in raw_kfold_stats[category]:
+ mean = np.mean(raw_kfold_stats[category][metric])
+ std = np.std(raw_kfold_stats[category][metric])
+ overall_kfold_stats[category][metric + '_mean'] = mean
+ overall_kfold_stats[category][metric + '_std'] = std
+
+ kfold_training_stats['overall'] = overall_kfold_stats
+
+ # save k-fold cv statistics
+ save_json(os.path.join(output_directory, 'kfold_training_statistics.json'),
+ kfold_training_stats)
+
+ # save k-fold split indices
+ if not skip_save_k_fold_split_indices:
+ save_json(os.path.join(output_directory, 'kfold_split_indices.json'),
+ kfold_split_indices)
+
+ logger.info('completed {:d}-fold cross validation'.format(k_fold))
+
+
def cli(sys_argv):
parser = argparse.ArgumentParser(
description='This script trains and tests a model',
@@ -378,6 +532,26 @@ def cli(sys_argv):
default=False
)
+ # -----------------
+ # K-fold parameters
+ # -----------------
+ parser.add_argument(
+ '-kf',
+ '--k_fold',
+ type=int,
+ default=None,
+ help='number of folds for a k-fold cross validation run '
+ )
+ parser.add_argument(
+ '-skfsi',
+ '--skip_save_k_fold_split_indices',
+ action='store_true',
+ default=False,
+ help='disables saving indices generated to split training data set '
+ 'for the k-fold cross validation run, but if it is not needed '
+ 'turning it off can slightly increase the overall speed'
+ )
+
# ----------------
# Model parameters
# ----------------
@@ -526,7 +700,10 @@ def cli(sys_argv):
if is_on_master():
print_ludwig('Experiment', LUDWIG_VERSION)
- experiment(**vars(args))
+ if args.k_fold is None:
+ experiment(**vars(args))
+ else:
+ kfold_cross_validate(**vars(args))
if __name__ == '__main__':
diff --git a/ludwig/train.py b/ludwig/train.py
--- a/ludwig/train.py
+++ b/ludwig/train.py
@@ -21,6 +21,7 @@
import argparse
import logging
import os
+import os.path
import sys
from pprint import pformat
@@ -96,17 +97,17 @@ def full_train(
:param model_definition_file: The file that specifies the model definition.
It is a yaml file.
:type model_definition_file: filepath (str)
- :param data_csv: A CSV file contanining the input data which is used to
+ :param data_csv: A CSV file containing the input data which is used to
train, validate and test a model. The CSV either contains a
split column or will be split.
:type data_csv: filepath (str)
- :param data_train_csv: A CSV file contanining the input data which is used
+ :param data_train_csv: A CSV file containing the input data which is used
to train a model.
:type data_train_csv: filepath (str)
- :param data_validation_csv: A CSV file contanining the input data which is used
+ :param data_validation_csv: A CSV file containing the input data which is used
to validate a model..
:type data_validation_csv: filepath (str)
- :param data_test_csv: A CSV file contanining the input data which is used
+ :param data_test_csv: A CSV file containing the input data which is used
to test a model.
:type data_test_csv: filepath (str)
:param data_hdf5: If the dataset is in the hdf5 format, this is used instead
@@ -146,7 +147,7 @@ def full_train(
:param skip_save_model: Disables saving model weights
and hyperparameters each time the model
improves. By default Ludwig saves model weights after each epoch
- the validation measure imrpvoes, but if the model is really big
+ the validation measure improves, but if the model is really big
that can be time consuming if you do not want to keep
the weights and just find out what performance can a model get
with a set of hyperparameters, use this parameter to skip it,
@@ -169,8 +170,8 @@ def full_train(
is not needed turning it off can slightly increase the
overall speed..
:type skip_save_progress: Boolean
- :param output_directory: The directory that will contanin the training
- statistics, the saved model and the training procgress files.
+ :param output_directory: The directory that will contain the training
+ statistics, the saved model and the training progress files.
:type output_directory: filepath (str)
:param gpus: List of GPUs that are available for training.
:type gpus: List
diff --git a/ludwig/utils/data_utils.py b/ludwig/utils/data_utils.py
--- a/ludwig/utils/data_utils.py
+++ b/ludwig/utils/data_utils.py
@@ -26,6 +26,7 @@
import numpy as np
import pandas as pd
from pandas.errors import ParserError
+from sklearn.model_selection import KFold
logger = logging.getLogger(__name__)
@@ -52,16 +53,17 @@ def read_csv(data_fp, header=0):
:param header: header argument for pandas to read the csv
:return: Pandas dataframe with the data
"""
-
- separator=','
+
+ separator = ','
with open(data_fp, 'r') as csvfile:
try:
- dialect = csv.Sniffer().sniff(csvfile.read(1024*100), delimiters=[',' , '\t', '|', ' '])
- separator=dialect.delimiter
+ dialect = csv.Sniffer().sniff(csvfile.read(1024 * 100),
+ delimiters=[',', '\t', '|', ' '])
+ separator = dialect.delimiter
except csv.Error:
# Could not conclude the delimiter, defaulting to comma
pass
-
+
try:
df = pd.read_csv(data_fp, sep=separator, header=header)
except ParserError:
@@ -395,6 +397,7 @@ def add_sequence_feature_column(df, col_name, seq_length):
df[new_col_name] = new_data
df[new_col_name] = df[new_col_name].fillna(method='backfill')
+
def override_in_memory_flag(input_features, override_value):
num_overrides = 0
for feature in input_features:
@@ -404,6 +407,7 @@ def override_in_memory_flag(input_features, override_value):
num_overrides += 1
return num_overrides
+
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
@@ -418,3 +422,11 @@ def default(self, obj):
return obj.tolist()
else:
return json.JSONEncoder.default(self, obj)
+
+
+def generate_kfold_splits(data_df, num_folds, random_state):
+ kf = KFold(n_splits=num_folds, shuffle=True, random_state=random_state)
+ fold_num = 0
+ for train_indices, test_indices in kf.split(data_df):
+ fold_num += 1
+ yield train_indices, test_indices, fold_num
| diff --git a/tests/integration_tests/test_kfold_cv.py b/tests/integration_tests/test_kfold_cv.py
new file mode 100644
--- /dev/null
+++ b/tests/integration_tests/test_kfold_cv.py
@@ -0,0 +1,80 @@
+import logging
+import os
+import os.path
+import tempfile
+
+import yaml
+
+from ludwig.experiment import kfold_cross_validate
+from ludwig.utils.data_utils import load_json
+from tests.integration_tests.utils import category_feature
+from tests.integration_tests.utils import generate_data
+from tests.integration_tests.utils import numerical_feature
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO)
+logging.getLogger("ludwig").setLevel(logging.INFO)
+
+
+def test_kfold_cv():
+ num_folds = 3
+
+ # setup temporary directory to run test
+ with tempfile.TemporaryDirectory() as tmpdir:
+
+ training_data_fp = os.path.join(tmpdir, 'train.csv')
+ model_definition_fp = os.path.join(tmpdir, 'model_definition.yaml')
+ results_dir = os.path.join(tmpdir, 'results')
+ statistics_fp = os.path.join(results_dir,
+ 'kfold_training_statistics.json')
+ indices_fp = os.path.join(results_dir, 'kfold_split_indices.json')
+
+ # generate synthetic data for the test
+ input_features = [
+ numerical_feature(normalization='zscore'),
+ numerical_feature(normalization='zscore')
+ ]
+
+ output_features = [
+ category_feature(vocab_size=2, reduce_input='sum')
+ ]
+
+ generate_data(input_features, output_features, training_data_fp)
+
+ # generate model definition file
+ model_definition = {
+ 'input_features': input_features,
+ 'output_features': output_features,
+ 'combiner': {'type': 'concat', 'fc_size': 14},
+ 'training': {'epochs': 2}
+ }
+
+ with open(model_definition_fp, 'w') as f:
+ yaml.dump(model_definition, f)
+
+ # run k-fold cv
+ kfold_cross_validate(
+ k_fold=num_folds,
+ model_definition_file=model_definition_fp,
+ data_csv=training_data_fp,
+ output_directory=results_dir,
+ logging_level='warn'
+ )
+
+ # check for expected results
+ # check for existence and structure of statistics file
+ assert os.path.isfile(statistics_fp)
+
+ # check for required keys
+ cv_statistics = load_json(statistics_fp)
+ for key in ['fold_' + str(i + 1)
+ for i in range(num_folds)] + ['overall']:
+ assert key in cv_statistics
+
+ # check for existence and structure of split indices file
+ assert os.path.isfile(indices_fp)
+
+ # check for required keys
+ cv_indices = load_json(indices_fp)
+ for key in ['fold_' + str(i + 1) for i in range(num_folds)]:
+ assert key in cv_indices
| k-fold cross validation for training models with CLI
**Describe the use case**
Training multiple models with shuffled training data (k-folds) can reveals information about our data sets. For example, one fold may be much less accurate than other folds telling us that we might need to increase the total number of samples.
**Describe the solution you'd like**
For the CLI, it would be useful if we could have a parameter to train with k-fold cross validation.
**Describe alternatives you've considered**
k-fold can be achieved with the python API but not as convenient as the CLI.
**Additional context**
I can provide the sample code I've been using for the API.
| @danielduckworth Thanks for the suggestion. This is a good idea.
Feel free to create a pr if you have time to work on it :)
@msaisumanth I'll work on it with my team and submit something in the next few weeks. We have already benefited greatly from Ludwig so happy to contribute.
Thank you so much @danielduckworth . I would suggest you do do a draft minimal not working pr so that we can give you guidance on the design first, so that when you end up implementing it it's more likely there won't be any misalignment.
Hello @w4nderlust , I am working with @danielduckworth on this k-fold contrib. I want to know what do you expect as the structure of the minimal draft over here. Should I right it as a standalone function that will be imported or should it be added as an extra parameter in the full_train() function and add the parameter in cli() function. Sorry if I am asking something obvious but this is my first time contributing :)
@the-learner Thanks a lot for working on this! I think what @w4nderlust was suggesting was to outline the high level flow so that we're all on the same page as to how to implement it.
You can write the logic in full_train first and then adding the cli stuff should be fairly easy. So please tell us how you would implement this in full_train at a high level and then you can start coding. If you think it's straightforward, go right ahead and we can review the code.
@msaisumanth The change seems very straight forward but I will tell high level approach anyway.
K-fold k value will be taken input into full_train. There will be different combinations of data (k precisely), based on those data combinations, the training will be done. I might not have to change much of a functionality in the main training function as well.
@the-learner there are a bunch of additional considerations: if you have more than one split, the HDF5 and JSON files will be saved for all the splits? Will they be skipped? What about the results? Will you have k different results directories or just one containing all 3 of them? And what about aggregate results (with mean and standard deviation)?
@w4nderlust That was indeed a question on my mind for some time. I think the function of k-fold splits should be to let the user know that the data size needs to be increased if the accuracy values difference will be considerably large. Because saving aggregate results, or most accurate results will just be an accuracy hack. I think it will just store models and final results for one of the splits. Functionality of k-fold can be to inform user about the nature of data. So the results on the test data will be displayed for the k-splits but final model should just be one as it is now.
I've discussed this with @the-learner and I think what makes the most sense is to stay in keeping with the way ludwig already works. So that means storing separate HDF5 and JSON files for each split.
The results of each k-fold would also be saved separately so they can be reported for each fold and the mean calculated and reported.
I think the final step would then be to train using all the data (no split) as the final model. The user can then decide on the basis of the k-fold results whether they want to use the final model.
I'm not sure about the convention for the directories. Do you have any preference @w4nderlust?
This makes sense. A few more considerations:
- in case the k fold is specified I would train just k models and not a final model on all the splits, as that can always be done anyway by calling ludwig again without the split parameter
- when ludwig creates the HDF5 and JSON files, they are placed in the same directory of the original data with the same name, so in order to make the k fold work, you will probably have to either name the files created for each for differently (there's no way to doing it at the moment) or not saving HDF5 and JSON at all
- code-wise, the way I'm thinking about this is a `kfold_cross_validate()` function in `train..py` that calls `full_train()` `k` times. In the main `cli()` function there would be an if that checks if a `kfold` command line parameter is psecified, if not, `full_train` will be called, otherwise `kfold_cross_validate` will be called. But I'm also open to other designs if you think there's a better way.
- after all the k fold models have been trained and the results have been obtained, I would create either an additional JSON file containing the k fold summarized statistics (something like `kfold_training_statistics.json`) so that we could later also build visualizations, like violin plots of the like
- as the different k fold training processes are entirely independent of each other, it would be interesting to find a way to run them in parallel on separate processes. The problem I see here is the fact that, in particular when using GPUs, TensorFlow tries to grab all the resources possible, so running multiple things on the same GPU is kinda tricky, you would have to reduce the percentage of ram used for each process for instance, but if you have as many GPUs as folds, that would be pretty easy to do. Anyway, this could be done also later on, but I wanted to put it here as I believe it would be pretty useful. | 2020-01-09T02:43:48 |
ludwig-ai/ludwig | 626 | ludwig-ai__ludwig-626 | [
"620"
] | 72f408c0a5baee8d51c3ad561c0e3631991d4e0f | diff --git a/ludwig/utils/visualization_utils.py b/ludwig/utils/visualization_utils.py
--- a/ludwig/utils/visualization_utils.py
+++ b/ludwig/utils/visualization_utils.py
@@ -849,7 +849,7 @@ def calibration_plot(
# sns.tsplot(mean_predicted_values[i], fraction_positives[i], ax=ax1, color=colors[i])
assert len(mean_predicted_values[i]) == len(fraction_positives[i])
- order = min(3, len(mean_predicted_values[i] - 1))
+ order = min(3, len(mean_predicted_values[i]) - 1)
sns.regplot(mean_predicted_values[i], fraction_positives[i],
order=order, x_estimator=np.mean, color=colors[i],
| diff --git a/tests/integration_tests/test_visualization_api.py b/tests/integration_tests/test_visualization_api.py
--- a/tests/integration_tests/test_visualization_api.py
+++ b/tests/integration_tests/test_visualization_api.py
@@ -82,7 +82,8 @@ def __init__(self, csv_filename):
# probabilities need to be list of lists containing each row data
# from the probability columns
# ref: https://uber.github.io/ludwig/api/#test - Return
- self.probability = self.test_stats_full[0].iloc[:, 2:].values
+ num_probs = self.output_features[0]['vocab_size']
+ self.probability = self.test_stats_full[0].iloc[:, 1:(num_probs+2)].values
self.ground_truth_metadata = self.model.train_set_metadata
target_predictions = test_df[self.output_feature_name]
self.ground_truth = np.asarray([
| bug: test_visualization.py::test_visualization_calibration_1_vs_all_output_saved fails with IndexError in seaborn regplot() function
**Describe the bug**
Unit test
```
tests/
integration_tests/
test_visualization.py::test_visualization_calibration_1_vs_all_output_saved
```
fails with `IndexError` exception in the travis ci run and locally for me in a docker container:
```
for command, viz_pattern in zip(commands, vis_patterns):
result = subprocess.run(command)
figure_cnt = glob.glob(viz_pattern)
> assert 0 == result.returncode
E AssertionError: assert 0 == 1
E + where 1 = CompletedProcess(args=['python', '-m', 'ludwig.visualize', '--visualization', 'calibration_1_vs_all', '--metrics', 'ac...robabilities.npy', '--model_names', 'Model1', 'Model2', '--top_k', '6', '-od', 'results/experiment_run'], returncode=1).returncode
tests/integration_tests/test_visualization.py:1581: AssertionError
----------------------------- Captured stderr call -----------------------------
Traceback (most recent call last):
File "/opt/python/3.6.7/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/python/3.6.7/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/travis/build/uber/ludwig/ludwig/visualize.py", line 3265, in <module>
cli(sys.argv[1:])
File "/home/travis/build/uber/ludwig/ludwig/visualize.py", line 3260, in cli
vis_func(**vars(args))
File "/home/travis/build/uber/ludwig/ludwig/visualize.py", line 623, in calibration_1_vs_all_cli
calibration_1_vs_all(probabilities_per_model, gt, **kwargs)
File "/home/travis/build/uber/ludwig/ludwig/visualize.py", line 2654, in calibration_1_vs_all
filename=filename
File "/home/travis/build/uber/ludwig/ludwig/utils/visualization_utils.py", line 856, in calibration_plot
algorithm_names) else '')
File "/home/travis/virtualenv/python3.6.7/lib/python3.6/site-packages/seaborn/regression.py", line 810, in regplot
x_jitter, y_jitter, color, label)
File "/home/travis/virtualenv/python3.6.7/lib/python3.6/site-packages/seaborn/regression.py", line 114, in __init__
self.dropna("x", "y", "units", "x_partial", "y_partial")
File "/home/travis/virtualenv/python3.6.7/lib/python3.6/site-packages/seaborn/regression.py", line 66, in dropna
setattr(self, var, val[not_na])
IndexError: too many indices for array
```
**To Reproduce**
Steps to reproduce the behavior:
Any PR submitted after 24Jan2020 should exhibit the problem
**Expected behavior**
Unit test should not fail
**Screenshots**
See above
**Environment (please complete the following information):**
Failure occurs in two environments. It fails in my local development environment.
- OS: MacOS 10.15.2
- Docker Desktop for Mac 2.2.0.0
- Python version: 3.6.8
- Ludwig version: 2.1
Failure also occurs in the [travis-ci environment](https://travis-ci.org/uber/ludwig/builds/642189471?utm_source=github_status&utm_medium=notification)
**Additional context**
After staring at this issue for a few days, it appears the error may be related to the version of the `seaborn` package. When the unit test is run using `seaborn 0.9.0`, the test completes successfully. However, when `seaborn 0.10.0` is installed, the test fails with the cited error message. I've confirmed this observation through local testing, i.e.,
* `seaborn 0.9.0` => unit test completes successfully
* `seaborn 0.10.0` => unit test fails
Note: unit test also fails with `seaborn 0.9.1`
`seaborn 0.10.0` was released on PyPi on 2020-01-24. `seaborn 0.9.0` was released 2018-07-16. Prior to mid-January 2020, I did not encounter any issues in running the `pytest` suite of tests.
`seaborn 0.10.0` [release notes](https://seaborn.pydata.org/whatsnew.html) indicate changes were made to the `regplot()` function, which is the one failing in ludwig's unit test. I can't tell if the issue we are seeing is due to a breaking api in the `seaborn` 0.10.0 api or a bug in 0.10.0 code base. More analysis is required.
Since the requirements file used to build the travis-ci environment specifies `seaborn>=0.7`, this means `pip install` will use the most recent version of seaborn greater than 0.7. This results in installing `seaborn 0.10.0`. The implication is that any PR submitted from this point on will fail in the cited unit test.
A possible short-term work-around for the travis-ci run failures is to update `requirements_viz.txt` to replace the current `seaborn` specification from `seaborn>=0.7` with `seaborn>=0.7,<=0.9.0`. If this is acceptable, I'll submit a PR to effect the change.
| I doubt there'll be any be repercussions to changing to seaborn <= 0.9.0. So it should be okay. @w4nderlust
I think it is worth fixing the specific function call and testing seaborn 0.10, we don't want to keep hooked to older versions.
@msaisumanth @w4nderlust
Initially I did try to figure out how to fix the function call but I got lost in understanding some of the internal data required data structures. I'm still willing to try and fix the function call to support `seaborn 0.10.0`. I may need some help in questions about the data structures for the 1_vs_all plot.
While I work on the fix to support `seaborn 0.10.0`, I'll submit the PR to cap the version of `seaborn` installed for the travis-ci to 0.9.0. This way folks should get a clean run when submitting a PR for the near-term.
Once the `seaborn 0.10.0` issues is fixed, the PR will backout the '<=0.9.0' cap on the `seaborn` version.
Makes sense @jimthompson5802 , thank you!
I actually added it myself. I'll keep the issue open anyway, because I prefer to adapt to whatever API change seaborn did in 0.10.0 as a more sustainable solution moving forward.
OK..I focus on the long-term fix to support `seaborn 0.10.0`
I've been working on this a bit, made some discoveries and pushed some code: https://github.com/uber/ludwig/commit/997c6bc02dffcc726efda7ddbb5978e425ffab96
The first discovery is that the number of classes was wrong, so I fixed it.
The second discovery is that there was no way to tell the plot function to draw a line, so now I set it to check how many points are actually there and and set the order ccordingly.
Third discovery: `calibration_curve` return lists of the length it wants even if you specify a specific number of bins, so now i add a 0,0 point at the beginning so that there's at least one point.
Fourth discovery: the API version of the test passes, but the CLI version does not, so there's something fishy there.
I wasn't able to fix the problem anyway, it goes deep in seaborn's linear algebra functions and it's pretty hard to figure out to be hones because of pretty arcane error:
```
ValueError: On entry to DLASCL parameter number 4 had an illegal value
```
It is probably related to the fact that there are nans in the matrix to invert, but I'm not 100 sure how nans end up there in the CLI case and not in the API case.
I pulled down your commit `997cbc` and can reproduce the symptoms. Right now I'm focusing on understanding data flows to `seaborn`'s `reg_plot()` function.
@w4nderlust I instrumented the function `calibration_1_vs_all()` in `ludwig/visualize.py`.
Re: your observation that the api test succeeds but the cli version fails with this error `ValueError: On entry to DLASCL parameter number 4 had an illegal value`.
From what I can tell the api and cli tests create the same synthetic data set for the tests.
In case of the **cli** test the probabilities passed to `calibaration_1_vs_all()` function is
<img width="443" alt="probablities_vis_cli_fails" src="https://user-images.githubusercontent.com/1425269/73410633-d9078180-42d0-11ea-85d5-1b6f835295ad.png">
In case of the **api** test the probabilities passed to `calibration_1_vs_all()` is this
<img width="458" alt="probabilities_vis_api_works" src="https://user-images.githubusercontent.com/1425269/73410674-f4728c80-42d0-11ea-8a31-b6c70db1f433.png">
I was expecting the structure of the probabilities matrix to be similar but they seem to be different. What should the structure of the probabilities matrix? Since the api version works, I'm thinking the first two columns contain the predicted probability for that particular class. The 3rd column could be the probability for the predicted class.
Does this observation point to a direction to resolve this issue?
The structure is the same, although the distributions are very different. In the second case there's something weird as they don't look like probabilities, i.e. the rows don't sum up to 1... But that's the one that works surprisingly enough.
One potential weak point is the `calibration_curve` as sometimes it returns vectors of size 21 as requested (number of bins) sometimes it doesn't so maybe that function is doing something weird. Also it has a normalize parameter that if turned no gives some errors. Maybe this is a good starting point for investigation.
Finally, it seems like even with the modified version of seaborn there's still one failing test case. Will look into it.
I understand each row in the probabilities matrix should sum to one. Although the **cli** version fails, it conforms to this requirement (within rounding). The **api** version, which does not fail, does not conform to this requirement.
From what I can see the sample training data created for both the **api** and **cli** version create a two category output response variable:
```
output_features = [category_feature(vocab_size=2, reduce_input='sum')]
```
Excerpt from `description.json`:
```
"output_features": [
{
"dependencies": [],
"embedding_size": 5,
"idx2str": [
"pvfdrgcrcl",
"ICznO"
],
"preprocessing": {
"category": {
"fill_value": "<UNK>",
"lowercase": false,
"missing_value_strategy": "fill_with_const",
"most_common": 10000
},
````
I'm now trying to understand what probabilities are represented in each of the three columns. If the output variable has two valid values, is the third probability--column 0--for the `<UNK>` missing value?
<img width="443" alt="probablities_vis_cli_fails" src="https://user-images.githubusercontent.com/1425269/73445590-02ec9280-4329-11ea-8759-177a89035f5e.png">
Assuming the three probabilities are as I described above, is the expected output the following, one set of charts for each of the three columns in the probability matrix?
<img width="413" alt="Screen Shot 2020-01-30 at 06 44 36" src="https://user-images.githubusercontent.com/1425269/73447142-2107c200-432c-11ea-9dec-785a89dcecb6.png">
@w4nderlust I have an explanation for this issue you pointed out re: the **api** test:
> In the second case there's something weird as they don't look like probabilities, i.e. the rows don't sum up to 1... But that's the one that works surprisingly enough.
I believe the issue is due to an error in the `Experiment` class in `test_visualization_api.py` in this statement (line 85)
```
self.probability = self.test_stats_full[0].iloc[:, 2:].values
```
<img width="721" alt="Experiment_code_collect_probabilities" src="https://user-images.githubusercontent.com/1425269/73505001-32d67d00-439f-11ea-8717-0cd6ea27b67d.png">
This is the `self.test_stats_full[0]` data structure. The `.iloc[:,2:]` refers to the last three columns in the data structure.
<img width="1149" alt="test_set_probabilities" src="https://user-images.githubusercontent.com/1425269/73505056-5ef1fe00-439f-11ea-8aff-1b044c1cd4ac.png">
For this particular, I believe the correct specification should have been `.iloc[:,1:4]`, which refers to the 2nd, 3rd and 4th column in the data structure. From eye balling the numbers, these three number add up to 1, with rounding.
Unless I hear otherwise, I'll be make the change to refer to the correct columns. I'll make sure that the modifications should handle any number of probabilities and not hard coded to three columns.
After making the change described above, the probabilities for the **api** test case now have the correct structure, i.e., the rows sum to one.
<img width="480" alt="Screen Shot 2020-01-30 at 21 08 08" src="https://user-images.githubusercontent.com/1425269/73506828-e130f100-43a4-11ea-9616-eed99803eabb.png">
With this change the **api** unit test now fails just like the **cli** unit test.
I think this is progress because there is now a consistent error. :-) | 2020-02-01T18:21:11 |
ludwig-ai/ludwig | 829 | ludwig-ai__ludwig-829 | [
"828"
] | b31ce876f6cd364471f7154740485656ab4ea6c9 | diff --git a/ludwig/hyperopt_cli.py b/ludwig/hyperopt_cli.py
--- a/ludwig/hyperopt_cli.py
+++ b/ludwig/hyperopt_cli.py
@@ -471,11 +471,11 @@ def cli(sys_argv):
help="list of gpus to use"
)
parser.add_argument(
- "-gf",
- "--gpu_fraction",
- type=float,
- default=1.0,
- help="fraction of gpu memory to initialize the process with",
+ '-gml',
+ '--gpu_memory_limit',
+ type=int,
+ default=None,
+ help='maximum memory in MB to allocate per GPU device'
)
parser.add_argument(
"-uh",
diff --git a/ludwig/utils/tf_utils.py b/ludwig/utils/tf_utils.py
--- a/ludwig/utils/tf_utils.py
+++ b/ludwig/utils/tf_utils.py
@@ -84,27 +84,26 @@ def initialize_tensorflow(gpus=None,
gpus = gpus.strip()
gpus = [int(g) for g in gpus.split(",")]
- if gpus:
- if len(gpus) == 1 and gpus[0] == -1:
- # CUDA_VISIBLE_DEVICES syntax for disabling all GPUs
- tf.config.set_visible_devices([], 'GPU')
- else:
- # Allow memory growth and set memory limit. Regardless of whether we do this
- # before or after setting visible devices, TensorFlow will allocate a small
- # amount of memory per device.
- for gpu in gpu_devices:
- tf.config.experimental.set_memory_growth(gpu, True)
- if gpu_memory_limit is not None:
- tf.config.set_logical_device_configuration(
- gpu,
- [tf.config.LogicalDeviceConfiguration(
- memory_limit=gpu_memory_limit)])
-
- # Set visible devices so GPU utilization is isolated
- # (no GPU contention between workers).
- if gpu_devices:
- local_devices = [gpu_devices[g] for g in gpus]
- tf.config.set_visible_devices(local_devices, 'GPU')
+ if gpus and len(gpus) == 1 and gpus[0] == -1:
+ # CUDA_VISIBLE_DEVICES syntax for disabling all GPUs
+ tf.config.set_visible_devices([], 'GPU')
+ else:
+ # Allow memory growth and set memory limit. Regardless of whether we do this
+ # before or after setting visible devices, TensorFlow will allocate a small
+ # amount of memory per device.
+ for gpu in gpu_devices:
+ tf.config.experimental.set_memory_growth(gpu, True)
+ if gpu_memory_limit is not None:
+ tf.config.set_logical_device_configuration(
+ gpu,
+ [tf.config.LogicalDeviceConfiguration(
+ memory_limit=gpu_memory_limit)])
+
+ # Set visible devices so GPU utilization is isolated
+ # (no GPU contention between workers).
+ if gpus and gpu_devices:
+ local_devices = [gpu_devices[g] for g in gpus]
+ tf.config.set_visible_devices(local_devices, 'GPU')
_set_tf_init_params(param_tuple)
| gpu_memory_limit only works if gpus parameter is specified
I'm trying to use the new gpu_memory_limit parameter introduced by TF2 as opposed as the gpu_fraction one.
One thing I noticed is that it only works if `--gpus` is also specified, as per
https://github.com/uber/ludwig/blob/b31ce876f6cd364471f7154740485656ab4ea6c9/ludwig/utils/tf_utils.py#L87-L107
Once you both specify `--gpus 0 --gpu_memory_limit 4000` (for example), allocated GPU memory is correctly set to 4GB. Without the `--gpus 0` bit, all available memory is allocated as if no `--gpu_memory_limit 4000` was specified.
One more thing is that Hyperopt still references the "old" `--gpu_fraction`
https://github.com/uber/ludwig/blob/b31ce876f6cd364471f7154740485656ab4ea6c9/ludwig/hyperopt_cli.py#L473-L479
| Hey @carlogrisetti, thanks for reporting this issue. I'll take a look at this today. | 2020-08-16T16:53:08 |
|
ludwig-ai/ludwig | 861 | ludwig-ai__ludwig-861 | [
"833"
] | 79ee5b1f2d34e144feff8a0f2e12c139387146ae | diff --git a/ludwig/experiment.py b/ludwig/experiment.py
--- a/ludwig/experiment.py
+++ b/ludwig/experiment.py
@@ -32,12 +32,14 @@
from ludwig.contrib import contrib_command, contrib_import
from ludwig.data.postprocessing import postprocess
from ludwig.globals import LUDWIG_VERSION, set_on_master, is_on_master
+from ludwig.models.trainer import Trainer
from ludwig.predict import predict
from ludwig.predict import print_test_results
from ludwig.predict import save_prediction_outputs
from ludwig.predict import save_test_statistics
from ludwig.train import full_train
-from ludwig.utils.data_utils import save_json, generate_kfold_splits
+from ludwig.utils.data_utils import save_json, generate_kfold_splits, \
+ is_model_dir
from ludwig.utils.defaults import default_random_seed, merge_with_defaults
from ludwig.utils.print_utils import logging_level_registry
from ludwig.utils.print_utils import print_ludwig
@@ -136,6 +138,15 @@ def experiment(
else:
batch_size = model_definition[TRAINING]['batch_size']
+ # if a model was saved on disk, reload it
+ model_dir = os.path.join(experiment_dir_name, 'model')
+ if is_model_dir(model_dir):
+ model = Trainer.load(model_dir,
+ use_horovod=use_horovod,
+ gpus=gpus,
+ gpu_memory_limit=gpu_memory_limit,
+ allow_parallel_threads=allow_parallel_threads)
+
# predict
test_results = predict(
test_set,
diff --git a/ludwig/utils/data_utils.py b/ludwig/utils/data_utils.py
--- a/ludwig/utils/data_utils.py
+++ b/ludwig/utils/data_utils.py
@@ -31,6 +31,8 @@
from sklearn.model_selection import KFold
from ludwig.constants import SPLIT
+from ludwig.globals import MODEL_HYPERPARAMETERS_FILE_NAME, \
+ TRAIN_SET_METADATA_FILE_NAME, MODEL_WEIGHTS_FILE_NAME
logger = logging.getLogger(__name__)
@@ -473,3 +475,19 @@ def get_path_size(
def clear_data_cache():
"""Clears any cached data objects (e.g., embeddings)"""
load_glove.cache_clear()
+
+
+def is_model_dir(path: str) -> bool:
+ hyperparameters_fn = os.path.join(path, MODEL_HYPERPARAMETERS_FILE_NAME)
+ ts_metadata_fn = os.path.join(path, TRAIN_SET_METADATA_FILE_NAME)
+ is_model_dir = False
+ if (os.path.isdir(path)
+ and os.path.isfile(hyperparameters_fn)
+ and os.path.isfile(ts_metadata_fn)):
+ weights_files_count = 0
+ for file_name in os.listdir(path):
+ if file_name.startswith(MODEL_WEIGHTS_FILE_NAME):
+ weights_files_count += 1
+ if weights_files_count >= 2:
+ is_model_dir = True
+ return is_model_dir
| [bug] test_statistics calculates for the last epoch
https://github.com/uber/ludwig/blob/d70bc4aff35e6ac3c4a17a828e080e703e33bce6/ludwig/experiment.py#L388
The issue here that I see test/validation statistics for the last epoch, not last improved.
Is that a bug or the feature?
| Let me look inot it. Also, that part of the code will undergo quite some substantial reworking before v0.3 exits the alpha stage anyway, so this it is great that if this is an error we are catching it now.
After looking into this I confirm that the test stats are computed using the model at the last epoch.
We'll fix this in the ongoing refactoring.
As a temporary workaround i suggest first training and then loading the trained model from disk (which is the best on validation) and then performing the eval/test.
@w4nderlust yeah, we do this anyway, but noticed weird test_statistics one time. Good to know it's a bug :) | 2020-09-03T01:38:28 |
|
ludwig-ai/ludwig | 897 | ludwig-ai__ludwig-897 | [
"819"
] | ef5b02879efe7993ad97969958238ff76cb57bdf | diff --git a/ludwig/models/trainer.py b/ludwig/models/trainer.py
--- a/ludwig/models/trainer.py
+++ b/ludwig/models/trainer.py
@@ -56,8 +56,6 @@
logger = logging.getLogger(__name__)
-tf.config.experimental_run_functions_eagerly(True)
-
class Trainer:
"""
| TF2 is slower than TF1, improve speed
https://github.com/tensorflow/tensorflow/issues/33487
Getting the same result: epochs became longer because of switching to TF2.
I noticed also that it's using less memory than TF1, but slower epochs are killing this advantage.
TF 2.3 – less epoch time, but still slow.
Looks like there are some issues with `experimental_run_functions_eagerly`.
Very disappointed. Going to switch back to ludwig 0.2.2.8
| There are a couple optimizations we'll implement before releasing v0.3 that will hopefully improve speed (using Inputs for "compiling" the graph beforehand and disabling running functions eagerly. Will keep you posted. Although this is not really a Ludwig bug as you can understand.
@tgaddair FYI. We may want to use `experimental_run_functions_eagerly` only when `debug=True` for instance, but need to test how much it actually helps.
Hey @ifokeev @w4nderlust, my guess at this point is that not enough of the code is being compiled into a graph, and is instead running eagerly. I will make it a priority to dig into this before the v0.3.0 release. To help with repro, is there a particular dataset / config you're running with that's showing this behavior?
Hey @tgaddair ,
> To help with repro, is there a particular dataset / config you're running with that's showing this behavior?
I see @jimthompson5802 and you merged the fix, so the dataset is not needed anymore.
Thank you for the fix. Will try soon.
Thanks @ifokeev. We'll wait to close the issue until we have confirmation that performance is where it should be for your model. | 2020-09-19T01:24:12 |
|
ludwig-ai/ludwig | 982 | ludwig-ai__ludwig-982 | [
"839"
] | bddbc4ff340cb5194b12cf9c0fc908b159826af5 | diff --git a/ludwig/models/trainer.py b/ludwig/models/trainer.py
--- a/ludwig/models/trainer.py
+++ b/ludwig/models/trainer.py
@@ -67,6 +67,7 @@ def __init__(
decay=False,
decay_rate=0.96,
decay_steps=10000,
+ staircase=False,
batch_size=128,
eval_batch_size=0,
bucketing_field=None,
@@ -187,6 +188,7 @@ def __init__(
self.decay = decay
self.decay_rate = decay_rate
self.decay_steps = decay_steps
+ self.staircase = staircase
self.batch_size = batch_size
self.eval_batch_size = batch_size if eval_batch_size < 1 else eval_batch_size
self.bucketing_field = bucketing_field
@@ -231,7 +233,6 @@ def write_epoch_summary(
summary_writer,
metrics,
step,
- learning_rate=None
):
if not summary_writer:
return
@@ -244,9 +245,6 @@ def write_epoch_summary(
)
metric_val = output_feature[metric][-1]
tf.summary.scalar(metric_tag, metric_val, step=step)
- if learning_rate:
- tf.summary.scalar("combined/epoch_learning_rate",
- learning_rate, step=step)
summary_writer.flush()
@classmethod
@@ -255,7 +253,8 @@ def write_step_summary(
train_summary_writer,
combined_loss,
all_losses,
- step
+ step,
+ learning_rate=None
):
if not train_summary_writer:
return
@@ -270,6 +269,10 @@ def write_step_summary(
loss_tag = "{}/step_training_loss".format(feature_name)
tf.summary.scalar(loss_tag, loss, step=step)
+ if learning_rate:
+ tf.summary.scalar("combined/step_learning_rate",
+ learning_rate, step=step)
+
train_summary_writer.flush()
def train(
@@ -476,7 +479,7 @@ def train(
digits=digits_per_epochs
)
)
- current_learning_rate = progress_tracker.learning_rate
+
# needed because batch size may change
batcher.batch_size = progress_tracker.batch_size
@@ -495,6 +498,39 @@ def train(
# training step loop
while not batcher.last_batch():
+
+ # Set learning rate for this batch
+ current_learning_rate = progress_tracker.learning_rate
+
+ if self.decay:
+ current_learning_rate = exponential_decay(
+ current_learning_rate,
+ self.decay_rate,
+ self.decay_steps,
+ progress_tracker.steps,
+ self.staircase
+ )
+
+ if self.horovod:
+ current_learning_rate = learning_rate_warmup_distributed(
+ current_learning_rate,
+ progress_tracker.epoch,
+ self.learning_rate_warmup_epochs,
+ self.horovod.size(),
+ batcher.step,
+ batcher.steps_per_epoch
+ ) * self.horovod.size()
+ else:
+ current_learning_rate = learning_rate_warmup(
+ current_learning_rate,
+ progress_tracker.epoch,
+ self.learning_rate_warmup_epochs,
+ batcher.step,
+ batcher.steps_per_epoch
+ )
+ self.optimizer.set_learning_rate(current_learning_rate)
+
+ # obtain batch
batch = batcher.next_batch()
inputs = {
i_feat.feature_name: batch[i_feat.feature_name]
@@ -531,6 +567,7 @@ def train(
combined_loss=loss,
all_losses=all_losses,
step=progress_tracker.steps,
+ learning_rate=current_learning_rate,
)
if self.horovod and first_batch:
@@ -545,33 +582,6 @@ def train(
self.horovod.broadcast_variables(
self.optimizer.variables(), root_rank=0)
- if self.decay:
- current_learning_rate = exponential_decay(
- current_learning_rate,
- self.decay_rate,
- self.decay_steps,
- batcher.step
- )
-
- if self.horovod:
- current_learning_rate = learning_rate_warmup_distributed(
- current_learning_rate,
- progress_tracker.epoch,
- self.learning_rate_warmup_epochs,
- self.horovod.size(),
- batcher.step,
- batcher.steps_per_epoch
- ) * self.horovod.size()
- else:
- current_learning_rate = learning_rate_warmup(
- current_learning_rate,
- progress_tracker.epoch,
- self.learning_rate_warmup_epochs,
- batcher.step,
- batcher.steps_per_epoch
- )
- self.optimizer.set_learning_rate(current_learning_rate)
-
progress_tracker.steps += 1
if is_on_master():
progress_bar.update(1)
@@ -606,7 +616,6 @@ def train(
summary_writer=train_summary_writer,
metrics=progress_tracker.train_metrics,
step=progress_tracker.epoch,
- learning_rate=current_learning_rate,
)
if validation_set is not None and validation_set.size > 0:
diff --git a/ludwig/utils/math_utils.py b/ludwig/utils/math_utils.py
--- a/ludwig/utils/math_utils.py
+++ b/ludwig/utils/math_utils.py
@@ -45,8 +45,15 @@ def convert_size(size_bytes):
return '{} {}'.format(s, size_name[i])
-def exponential_decay(initial_learning_rate, decay_rate, decay_steps, step):
- return initial_learning_rate * decay_rate ** (float(step) / decay_steps)
+def exponential_decay(initial_learning_rate, decay_rate, decay_steps, step,
+ staircase=False):
+ decay_rate = float(decay_rate)
+ decay_steps = float(decay_steps)
+ step = float(step)
+ exponent = step / decay_steps
+ if staircase:
+ exponent = math.ceil(exponent)
+ return initial_learning_rate * math.pow(decay_rate, exponent)
def learning_rate_warmup_distributed(
@@ -78,7 +85,7 @@ def learning_rate_warmup_distributed(
size
lr'(epoch = warmup) = lr
"""
- if epoch > warmup_epochs:
+ if epoch >= warmup_epochs:
return learning_rate
else:
epoch_adjusted = float(epoch) + (curr_step / steps_per_epoch)
@@ -93,19 +100,22 @@ def learning_rate_warmup(
curr_step,
steps_per_epoch
):
- global_curr_step = 1 + curr_step + epoch * steps_per_epoch
- warmup_steps = warmup_epochs * steps_per_epoch
+ if epoch >= warmup_epochs:
+ return learning_rate
+ else:
+ global_curr_step = 1 + curr_step + epoch * steps_per_epoch
+ warmup_steps = warmup_epochs * steps_per_epoch
- warmup_percent_done = global_curr_step / warmup_steps
- warmup_learning_rate = learning_rate * warmup_percent_done
+ warmup_percent_done = global_curr_step / warmup_steps
+ warmup_learning_rate = learning_rate * warmup_percent_done
- is_warmup = int(global_curr_step < warmup_steps)
- interpolated_learning_rate = (
- (1.0 - is_warmup) * learning_rate +
- is_warmup * warmup_learning_rate
- )
+ is_warmup = int(global_curr_step < warmup_steps)
+ interpolated_learning_rate = (
+ (1.0 - is_warmup) * learning_rate +
+ is_warmup * warmup_learning_rate
+ )
- return interpolated_learning_rate
+ return interpolated_learning_rate
def round2precision(val, precision: int = 0, which: str = ''):
| decay, decay_rate and decay_steps not implemented
Using latest master it seems to me that `decay`, `decay_rate `and `decay_steps `are not affecting the learning rate at all. Looking in the trainer model, they don't even seem to be used in the train function.
https://github.com/uber/ludwig/blob/62430e4a0dd7a4fda08d6dcd615fbdbbf53c5377/ludwig/models/trainer.py#L166-L195
`learning_rate `and `learning_rate_warmup_epochs` instead work fine (and I see them parsed in the train function)
Am I missing something?
Maybe it's related to the TF2 porting?
| using `decay` often, need this feature too
I can't really seem to find the point in where those parameters were dropped in the commit history... much as like they were meant to be, they were documented as they should have worked... but heh... never actually implemented those :)
@w4nderlust: this would have been obvious if there was such a "unknown parameter" warning\error in the yaml parsing, just to reference another use case for https://github.com/uber/ludwig/issues/822#issuecomment-674425054
Only references to `decay` in all the codebase are the default parameters. I can't find any other reference for the parameters in the subject.
This is weird, I have to go back in time to see what happened to these parameters. Thank you for posting this.
Indeed... And it's not TF2 fault, since a year ago they still aren't there.
Thanks
@w4nderlust I noticed that you replaced TF2 with the `master` branch and this affected a lot of commits. Don't know how. When I was switching back to TF1 I needed to use my own branch inside the fork
It was inside the deleted file
<img width="1014" alt="Screenshot 2020-08-22 at 00 25 13" src="https://user-images.githubusercontent.com/2017148/90936522-043cf400-e40e-11ea-82d1-b6340d2d5ab0.png">
This actually helped me rememeber: in TF1 ther was this exponential decay function i was using, inf TF2 thigns work differently, now I'm adding a todo before the release of v0.3 to re-enable the exponencial decay logic. Thank you for noticing this guys, it's great to release the alpha version and having members of the community spotting these things, will make the v0.3 the most robust version yet :)
#898 solves this. Let me know if it's not ok.
I am finally able to test decay and decay_rate, but it seems that the learning rate reduction due to decay is only applied one time, and then never again
ie: I set LR to 0.1, decay to true, decay_rate to 0.9 and LR goes down to 0.09986 after the first epoch and then stays that way forever
Can you check this? Do you prefer to have a new issue opened?
Thanks
So this is kinda weird. inspecting this I figured out some issues with the way the learning rate is computed. I'm fixing them, but yours is different. Also, changing the reporting of the learning rate per batch, as the decay (and the warm up) ar applead on a per batch basis.
Will probably put up a PR soonfor you to try. | 2020-10-29T01:17:31 |
|
ludwig-ai/ludwig | 1,011 | ludwig-ai__ludwig-1011 | [
"1009"
] | 64490b4cb7a378c1e9d09d0b0e12f2e11e3c20dc | diff --git a/ludwig/api.py b/ludwig/api.py
--- a/ludwig/api.py
+++ b/ludwig/api.py
@@ -1292,7 +1292,11 @@ def load(
```
"""
+ # Initialize Horovod and TensorFlow before calling `broadcast()` to prevent initializing
+ # TensorFlow with default parameters
horovod = configure_horovod(use_horovod)
+ initialize_tensorflow(gpus, gpu_memory_limit, allow_parallel_threads, horovod)
+
config = broadcast_return(lambda: load_json(os.path.join(
model_dir,
MODEL_HYPERPARAMETERS_FILE_NAME
diff --git a/ludwig/train.py b/ludwig/train.py
--- a/ludwig/train.py
+++ b/ludwig/train.py
@@ -159,7 +159,14 @@ def train_cli(
config_file)
if model_load_path:
- model = LudwigModel.load(model_load_path)
+ model = LudwigModel.load(
+ model_load_path,
+ logging_level=logging_level,
+ use_horovod=use_horovod,
+ gpus=gpus,
+ gpu_memory_limit=gpu_memory_limit,
+ allow_parallel_threads=allow_parallel_threads,
+ )
else:
model = LudwigModel(
config=config,
| diff --git a/tests/integration_tests/test_cli.py b/tests/integration_tests/test_cli.py
--- a/tests/integration_tests/test_cli.py
+++ b/tests/integration_tests/test_cli.py
@@ -28,13 +28,13 @@
from tests.integration_tests.utils import sequence_feature
-def _run_ludwig(command, **ludwig_kwargs):
- commands = ['ludwig', command]
+def _run_commands(commands, **ludwig_kwargs):
for arg_name, value in ludwig_kwargs.items():
commands += ['--' + arg_name, value]
cmdline = ' '.join(commands)
print(cmdline)
- completed_process = subprocess.run(cmdline, shell=True, encoding='utf-8',
+ completed_process = subprocess.run(cmdline,
+ shell=True,
stdout=subprocess.PIPE,
env=os.environ.copy())
assert completed_process.returncode == 0
@@ -42,6 +42,16 @@ def _run_ludwig(command, **ludwig_kwargs):
return completed_process
+def _run_ludwig(command, **ludwig_kwargs):
+ commands = ['ludwig', command]
+ return _run_commands(commands, **ludwig_kwargs)
+
+
+def _run_ludwig_horovod(command, **ludwig_kwargs):
+ commands = ['horovodrun', '-np', '2', 'ludwig', command]
+ return _run_commands(commands, **ludwig_kwargs)
+
+
def _prepare_data(csv_filename, config_filename):
# Single sequence input, single category output
input_features = [sequence_feature(reduce_output='sum')]
@@ -136,6 +146,33 @@ def test_train_cli_training_set(csv_filename):
output_directory=tmpdir)
+def test_train_cli_horovod(csv_filename):
+ """Test training using `horovodrun -np 2 ludwig train --dataset`."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ config_filename = os.path.join(tmpdir,
+ 'config.yaml')
+ dataset_filename = _prepare_data(csv_filename,
+ config_filename)
+ _run_ludwig_horovod(
+ 'train',
+ dataset=dataset_filename,
+ config_file=config_filename,
+ output_directory=tmpdir,
+ experiment_name='horovod_experiment',
+ )
+
+ # Check that `model_load_path` works correctly
+ _run_ludwig_horovod(
+ 'train',
+ dataset=dataset_filename,
+ config_file=config_filename,
+ output_directory=tmpdir,
+ model_load_path=os.path.join(
+ tmpdir, 'horovod_experiment_run', 'model'
+ ),
+ )
+
+
def test_export_savedmodel_cli(csv_filename):
"""Test exporting Ludwig model to Tensorflows savedmodel format."""
with tempfile.TemporaryDirectory() as tmpdir:
@@ -268,6 +305,7 @@ def test_collect_summary_activations_weights_cli(csv_filename):
'experiment_run',
'model')
)
+ stdout = completed_process.stdout.decode('utf-8')
# parse output of collect_summary to find tensor names to use
# in the collect_wights and collect_activations.
@@ -275,9 +313,9 @@ def test_collect_summary_activations_weights_cli(csv_filename):
# search for substring with layer names
layers = re.search(
"Layers(\w|\d|\:|\/|\n)*Weights",
- completed_process.stdout
+ stdout
)
- substring = completed_process.stdout[layers.start(): layers.end()]
+ substring = stdout[layers.start(): layers.end()]
# extract layer names
layers_list = []
@@ -290,9 +328,9 @@ def test_collect_summary_activations_weights_cli(csv_filename):
# search for substring with weights names
weights = re.search(
"Weights(\w|\d|\:|\/|\n)*",
- completed_process.stdout
+ stdout
)
- substring = completed_process.stdout[weights.start(): weights.end()]
+ substring = stdout[weights.start(): weights.end()]
# extract weights names
weights_list = []
| Preload fails with ludwig
I am using ludwig on cpu only mode on multicore system. I run as below
horovodrun -np 24 -H localhost:24 --binding-args="-bind-to socket -map-by socket" ludwig train --training_set train.csv --validation_set valid.csv --random_seed 42 -cf ${MODELFILE} --model_name ${MODELNAME}
Now I am trying to preload the model file for a different run (all else being the same. Infact, exact same data file even).
horovodrun -np 24 -H localhost:24 --binding-args="-bind-to socket -map-by socket" ludwig train --training_set train.csv --validation_set valid.csv --random_seed 42 -cf ${MODELFILE} --model_name ${MODELNAME} -mlp results/experiment_train_model/model
I am assuming this is the correct way to make multicore (otherwise ludwig just runs on single core)
It fails with error
```
[1,2]<stderr>: "Intra op parallelism cannot be modified after initialization.")
[1,2]<stderr>:RuntimeError: Intra op parallelism cannot be modified after initialization.
```
Any idea why preload fails ?
Will build a reproducer if needed, but want to eliminative trivial issues if I am missing something.
- OS: 18.04.1-Ubuntu
- Python version 3.7.6
- Ludwig version
- Tensorflow 2.3.1
| Hey @mangleddata, thanks for reporting this. This does look like a bug on our side when specifying the `model_load_path`. The root of the problem, I suspect, is that we are not plumbing the parameters [here](https://github.com/uber/ludwig/blob/master/ludwig/train.py#L162), resulting in attempts to initialize TensorFlow with different parameters. I'll take a look at this today. | 2020-11-16T19:55:55 |
ludwig-ai/ludwig | 1,031 | ludwig-ai__ludwig-1031 | [
"1030"
] | 263f3a747a26ec9165a4c45d650a785f5ea7b4cc | diff --git a/ludwig/hyperopt/execution.py b/ludwig/hyperopt/execution.py
--- a/ludwig/hyperopt/execution.py
+++ b/ludwig/hyperopt/execution.py
@@ -7,6 +7,7 @@
from ludwig.constants import *
from ludwig.hyperopt.sampling import HyperoptSampler, \
logger
+from ludwig.modules.metric_modules import get_best_function
from ludwig.utils.defaults import default_random_seed
from ludwig.utils.misc_utils import get_available_gpu_memory, get_from_registry
from ludwig.utils.tf_utils import get_available_gpus_cuda_string
@@ -20,9 +21,64 @@ def __init__(self, hyperopt_sampler: HyperoptSampler,
self.metric = metric
self.split = split
- def get_metric_score(self, eval_stats) -> float:
+ def get_metric_score(self, train_stats, eval_stats) -> float:
+ if (train_stats is not None and
+ self.split in train_stats and
+ VALIDATION in train_stats and # needed otherwise can-t figure
+ # out the best epoch
+ self.output_feature in train_stats[self.split] and
+ self.metric in train_stats[self.split][self.output_feature]):
+ logger.info("Returning metric score from training statistics")
+ return self.get_metric_score_from_train_stats(train_stats)
+ else:
+ logger.info("Returning metric score from eval statistics. "
+ "If skip_save_model is True, eval statistics "
+ "are calculated using the model at the last epoch "
+ "rather than the model at the epoch with "
+ "best validation performance")
+ return self.get_metric_score_from_eval_stats(eval_stats)
+
+ def get_metric_score_from_eval_stats(self, eval_stats) -> float:
+ if '.' in self.metric:
+ metric_parts = self.metric.split('.')
+ stats = eval_stats[self.output_feature]
+ for metric_part in metric_parts:
+ if isinstance(stats, dict):
+ if metric_part in stats:
+ stats = stats[metric_part]
+ else:
+ raise ValueError(
+ f"Evaluation statistics do not contain "
+ f"the metric {self.metric}")
+ else:
+ raise ValueError(f"Evaluation statistics do not contain "
+ f"the metric {self.metric}")
+ if not isinstance(stats, float):
+ raise ValueError(f"The metric {self.metric} in "
+ f"evaluation statistics is not "
+ f"a numerical value: {stats}")
+ return stats
return eval_stats[self.output_feature][self.metric]
+ def get_metric_score_from_train_stats(self, train_stats) -> float:
+ # grab the results of the model with highest validation test performance
+ train_valiset_stats = train_stats[VALIDATION]
+ train_evalset_stats = train_stats[self.split]
+
+ validation_field_result = train_valiset_stats[self.output_feature]
+ best_function = get_best_function(self.metric)
+
+ # results of the model with highest validation test performance
+ epoch_best_vali_metric, best_vali_metric = best_function(
+ enumerate(validation_field_result[self.metric]),
+ key=lambda pair: pair[1]
+ )
+ best_vali_metric_epoch_eval_metric = train_evalset_stats[
+ self.output_feature][self.metric][
+ epoch_best_vali_metric]
+
+ return best_vali_metric_epoch_eval_metric
+
def sort_hyperopt_results(self, hyperopt_results):
return sorted(
hyperopt_results, key=lambda hp_res: hp_res["metric_score"],
@@ -150,7 +206,7 @@ def execute(
random_seed=random_seed,
debug=debug,
)
- metric_score = self.get_metric_score(eval_stats)
+ metric_score = self.get_metric_score(train_stats, eval_stats)
metric_scores.append(metric_score)
hyperopt_results.append(
@@ -200,7 +256,7 @@ def init_worker():
def _run_experiment(self, hyperopt_dict):
parameters = hyperopt_dict["parameters"]
train_stats, eval_stats = run_experiment(**hyperopt_dict)
- metric_score = self.get_metric_score(eval_stats)
+ metric_score = self.get_metric_score(train_stats, eval_stats)
return {
"parameters": parameters,
@@ -216,7 +272,7 @@ def _run_experiment_gpu(self, hyperopt_dict):
hyperopt_dict["gpus"] = gpu_id_meta["gpu_id"]
hyperopt_dict["gpu_memory_limit"] = gpu_id_meta["gpu_memory_limit"]
train_stats, eval_stats = run_experiment(**hyperopt_dict)
- metric_score = self.get_metric_score(eval_stats)
+ metric_score = self.get_metric_score(train_stats, eval_stats)
finally:
self.queue.put(gpu_id_meta)
return {
@@ -565,7 +621,7 @@ def execute(
for stats, parameters in zip(stats_batch, sampled_parameters):
train_stats, eval_stats = stats
- metric_score = self.get_metric_score(eval_stats)
+ metric_score = self.get_metric_score(train_stats, eval_stats)
metric_scores.append(metric_score)
hyperopt_results.append(
diff --git a/ludwig/preprocess.py b/ludwig/preprocess.py
--- a/ludwig/preprocess.py
+++ b/ludwig/preprocess.py
@@ -208,7 +208,7 @@ def cli(sys_argv):
# ----------------
preprocessing_def = parser.add_mutually_exclusive_group(required=True)
preprocessing_def.add_argument(
- '-pd',
+ '-pc',
'--preprocessing_config',
type=yaml.safe_load,
help='preproceesing config. '
| diff --git a/tests/integration_tests/test_hyperopt.py b/tests/integration_tests/test_hyperopt.py
--- a/tests/integration_tests/test_hyperopt.py
+++ b/tests/integration_tests/test_hyperopt.py
@@ -193,4 +193,80 @@ def test_hyperopt_run_hyperopt(csv_filename):
os.path.join('results_hyperopt', 'hyperopt_statistics.json')
)
+def test_hyperopt_executor_get_metric_score():
+ executor = EXECUTORS[0]
+ output_feature = "of_name"
+ split = 'test'
+
+ train_stats = {
+ 'training': {
+ output_feature: {
+ 'loss': [0.58760345, 1.5066891],
+ 'accuracy': [0.6666667, 0.33333334],
+ 'hits_at_k': [1.0, 1.0]
+ },
+ 'combined': {
+ 'loss': [0.58760345, 1.5066891]
+ }
+ },
+ 'validation': {
+ output_feature: {
+ 'loss': [0.30233705, 2.6505466],
+ 'accuracy': [1.0, 0.0],
+ 'hits_at_k': [1.0, 1.0]
+ },
+ 'combined': {
+ 'loss': [0.30233705, 2.6505466]
+ }
+ },
+ 'test': {
+ output_feature: {
+ 'loss': [1.0876318, 1.4353828],
+ 'accuracy': [0.7, 0.5],
+ 'hits_at_k': [1.0, 1.0]
+ },
+ 'combined': {
+ 'loss': [1.0876318, 1.4353828]
+ }
+ }
+ }
+ eval_stats = {
+ output_feature: {
+ 'loss': 1.4353828,
+ 'accuracy': 0.5,
+ 'hits_at_k': 1.0,
+ 'overall_stats': {
+ 'token_accuracy': 1.0,
+ 'avg_precision_macro': 1.0,
+ 'avg_recall_macro': 1.0,
+ 'avg_f1_score_macro': 1.0,
+ 'avg_precision_micro': 1.0,
+ 'avg_recall_micro': 1.0,
+ 'avg_f1_score_micro': 1.0,
+ 'avg_precision_weighted': 1.0,
+ 'avg_recall_weighted': 1.0,
+ 'avg_f1_score_weighted': 1.0,
+ 'kappa_score': 0.6
+ },
+ 'combined': {'loss': 1.4353828}
+ }
+ }
+
+ metric = 'loss'
+ hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
+ None, output_feature, metric, split, **executor)
+ score = hyperopt_executor.get_metric_score(train_stats, eval_stats)
+ assert score == 1.0876318
+
+ metric = 'accuracy'
+ hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
+ None, output_feature, metric, split, **executor)
+ score = hyperopt_executor.get_metric_score(train_stats, eval_stats)
+ assert score == 0.7
+
+ metric = 'overall_stats.kappa_score'
+ hyperopt_executor = get_build_hyperopt_executor(executor["type"])(
+ None, output_feature, metric, split, **executor)
+ score = hyperopt_executor.get_metric_score(train_stats, eval_stats)
+ assert score == 0.6
| Hyperopt metric_score uses last epoch instead of best epoch
`hyperopt()` generates a `hyperopt_statistics.json` file which contains `training_stats` and the `metric_score` from each sample
I'm optimizing for validation accuracy, but I noticed that the final `metric_score` that gets reported in the json file uses the validation accuracy from the last epoch during training rather than the best epoch during training
Below is the output from my `hyperopt_statistics.json` file
```
"hyperopt_results": [
{
"eval_stats": {
"combined": {
"loss": 1.4781785011291504
},
"label": {
"accuracy": 0.47138965129852295, // this is the final reported accuracy
...
},
"metric_score": 0.47138965129852295, // this value comes from HyperoptExecutor.get_metric_score(self, eval_stats) which just copies the value above
"parameters": {
"training.learning_rate": 0.0006019209790229743,
"utterance.cell_type": "gru",
"utterance.num_layers": 1,
"utterance.state_size": 495
},
"training_stats": {
"validation": {
"combined": {
"loss": [
1.3932547569274902,
1.2642898559570312,
1.3837428092956543,
1.2704368829727173,
1.3504513502120972,
1.3695340156555176,
1.6437498331069946,
1.589107632637024,
1.4781785011291504
]
},
"label": {
"accuracy": [
0.40962761640548706,
0.440508633852005,
0.4423251450061798,
0.47320616245269775, // this is the best validation accuracy from training
0.47320616245269775,
0.440508633852005,
0.4523160755634308,
0.40690281987190247,
0.47138965129852295 // this value from the last epoch is what's actually reported above
],
...
```
Is this intended because the logger output from LudwigModel.train() uses the best validation accuracy so I thought hyperopt would have similar behavior?
If this is a bug I can try to help fix it as I have a general idea of where this behavior comes from in the codebase
**Environment:**
- Run in Google Colab
- Python version 3.6.9
- Ludwig version 0.3.1
| 2020-11-29T02:32:45 |
|
ludwig-ai/ludwig | 1,056 | ludwig-ai__ludwig-1056 | [
"1055"
] | 78584b1ad91a988efea672b2a31b238b8b6df74b | diff --git a/ludwig/utils/data_utils.py b/ludwig/utils/data_utils.py
--- a/ludwig/utils/data_utils.py
+++ b/ludwig/utils/data_utils.py
@@ -136,7 +136,12 @@ def read_jsonl(data_fp, df_lib):
def read_excel(data_fp, df_lib):
- return df_lib.read_excel(data_fp)
+ fp_split = os.path.splitext(data_fp)
+ if fp_split[1] == '.xls':
+ excel_engine = 'xlrd'
+ else:
+ excel_engine = 'openpyxl'
+ return df_lib.read_excel(data_fp, engine=excel_engine)
def read_parquet(data_fp, df_lib):
| diff --git a/tests/integration_tests/test_experiment.py b/tests/integration_tests/test_experiment.py
--- a/tests/integration_tests/test_experiment.py
+++ b/tests/integration_tests/test_experiment.py
@@ -435,7 +435,7 @@ def test_experiment_image_dataset(
DATA_FORMATS_TO_TEST = [
- 'csv', 'df', 'dict', 'excel', 'feather', 'fwf', 'hdf5', 'html',
+ 'csv', 'df', 'dict', 'excel', 'excel_xls', 'feather', 'fwf', 'hdf5', 'html',
'json', 'jsonl', 'parquet', 'pickle', 'stata', 'tsv'
]
@pytest.mark.parametrize('data_format', DATA_FORMATS_TO_TEST)
diff --git a/tests/integration_tests/utils.py b/tests/integration_tests/utils.py
--- a/tests/integration_tests/utils.py
+++ b/tests/integration_tests/utils.py
@@ -540,6 +540,13 @@ def to_fwf(df, fname):
index=False
)
+ elif data_format == 'excel_xls':
+ dataset_to_use = replace_file_extension(raw_data, 'xls')
+ pd.read_csv(raw_data).to_excel(
+ dataset_to_use,
+ index=False
+ )
+
elif data_format == 'feather':
dataset_to_use = replace_file_extension(raw_data, 'feather')
pd.read_csv(raw_data).to_feather(
| Use openpyxl instead of xlrd for reading newer Excel files
With the release of xlrd 2, support for newer Excel formats like xlsx has been dropped. It is recommended to use openpyxl instead and to only use xlrd for legacy Excel formats.
See: https://stackoverflow.com/questions/65254535/xlrd-biffh-xlrderror-excel-xlsx-file-not-supported
| 2020-12-16T04:09:31 |
|
ludwig-ai/ludwig | 1,138 | ludwig-ai__ludwig-1138 | [
"1134"
] | 6bb9c7e187b29c907dc3fdb38d7e14fd1706aa14 | diff --git a/ludwig/backend/ray.py b/ludwig/backend/ray.py
--- a/ludwig/backend/ray.py
+++ b/ludwig/backend/ray.py
@@ -28,7 +28,7 @@
from ludwig.data.dataframe.dask import DaskEngine
from ludwig.models.predictor import BasePredictor, RemotePredictor
from ludwig.models.trainer import BaseTrainer, RemoteTrainer
-from ludwig.utils.tf_utils import initialize_tensorflow
+from ludwig.utils.tf_utils import initialize_tensorflow, save_weights_to_buffer, load_weights_from_buffer
logger = logging.getLogger(__name__)
@@ -73,12 +73,14 @@ def get_total_resources(bucket):
class RayRemoteModel:
def __init__(self, model):
+ buf = save_weights_to_buffer(model)
self.cls, self.args, state = list(model.__reduce__())
- self.state = ray.put(state)
+ self.state = ray.put(buf)
def load(self):
obj = self.cls(*self.args)
- obj.__setstate__(ray.get(self.state))
+ buf = ray.get(self.state)
+ load_weights_from_buffer(obj, buf)
return obj
@@ -90,13 +92,13 @@ def train(self, *args, **kwargs):
results = super().train(*args, **kwargs)
if results is not None:
model, *stats = results
- results = (model.get_weights(), *stats)
+ results = (save_weights_to_buffer(model), *stats)
return results
def train_online(self, *args, **kwargs):
results = super().train_online(*args, **kwargs)
if results is not None:
- results = results.get_weights()
+ results = save_weights_to_buffer(results)
return results
@@ -114,7 +116,7 @@ def train(self, model, *args, **kwargs):
)
weights, *stats = results[0]
- model.set_weights(weights)
+ load_weights_from_buffer(model, weights)
return (model, *stats)
def train_online(self, model, *args, **kwargs):
@@ -124,7 +126,7 @@ def train_online(self, model, *args, **kwargs):
)
weights = results[0]
- model.set_weights(weights)
+ load_weights_from_buffer(model, weights)
return model
@property
diff --git a/ludwig/data/dataset/parquet.py b/ludwig/data/dataset/parquet.py
--- a/ludwig/data/dataset/parquet.py
+++ b/ludwig/data/dataset/parquet.py
@@ -14,6 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
+import math
+
import tensorflow as tf
from petastorm import make_batch_reader
@@ -27,6 +29,7 @@
class ParquetDataset(Dataset):
def __init__(self, url, features, training_set_metadata):
self.url = url
+ self.features = [feature[PROC_COLUMN] for feature in features]
self.training_set_metadata = training_set_metadata
with make_batch_reader(self.url) as reader:
@@ -76,7 +79,7 @@ def initialize_batcher(self,
dataset = dataset.shuffle(buffer_size)
dataset = dataset.batch(batch_size)
- steps_per_epoch = int(local_samples / batch_size)
+ steps_per_epoch = math.ceil(local_samples / batch_size)
batcher = IterableBatcher(self,
dataset,
diff --git a/ludwig/utils/tf_utils.py b/ludwig/utils/tf_utils.py
--- a/ludwig/utils/tf_utils.py
+++ b/ludwig/utils/tf_utils.py
@@ -14,10 +14,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
+import io
+import os
+import shutil
+import tempfile
import multiprocessing
import warnings
+import zipfile
import tensorflow as tf
+from ludwig.globals import MODEL_WEIGHTS_FILE_NAME
_TF_INIT_PARAMS = None
@@ -139,3 +145,21 @@ def get_available_gpus_cuda_string():
if len(gpus) == 0:
return None
return ','.join(gpus)
+
+
+def save_weights_to_buffer(model):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ weights_path = os.path.join(tmpdir, MODEL_WEIGHTS_FILE_NAME)
+ model.save_weights(weights_path)
+ with tempfile.TemporaryDirectory() as zipdir:
+ shutil.make_archive(os.path.join(zipdir, MODEL_WEIGHTS_FILE_NAME), 'zip', tmpdir)
+ with open(os.path.join(zipdir, f'{MODEL_WEIGHTS_FILE_NAME}.zip'), 'rb') as f:
+ return f.read()
+
+
+def load_weights_from_buffer(model, buf):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ with zipfile.ZipFile(io.BytesIO(buf)) as zip_ref:
+ zip_ref.extractall(tmpdir)
+ weights_path = os.path.join(tmpdir, MODEL_WEIGHTS_FILE_NAME)
+ model.load_weights(weights_path)
| diff --git a/tests/integration_tests/test_hyperopt.py b/tests/integration_tests/test_hyperopt.py
--- a/tests/integration_tests/test_hyperopt.py
+++ b/tests/integration_tests/test_hyperopt.py
@@ -88,6 +88,10 @@
def test_hyperopt_executor(sampler, executor, csv_filename,
validate_output_feature=False,
validation_metric=None):
+ if executor['type'] == 'fiber' and sampler['type'] == 'grid':
+ # This test is very slow and doesn't give us additional converage
+ pytest.skip('Skipping Fiber grid search')
+
input_features = [
text_feature(name="utterance", cell_type="lstm", reduce_output="sum"),
category_feature(vocab_size=2, reduce_input="sum")]
diff --git a/tests/integration_tests/test_ray.py b/tests/integration_tests/test_ray.py
--- a/tests/integration_tests/test_ray.py
+++ b/tests/integration_tests/test_ray.py
@@ -38,8 +38,8 @@
@pytest.fixture
-def ray_start_4_cpus():
- res = ray.init(num_cpus=4)
+def ray_start_2_cpus():
+ res = ray.init(num_cpus=2)
try:
yield res
finally:
@@ -50,7 +50,7 @@ def run_api_experiment(config, data_parquet):
# Sanity check that we get 4 slots over 1 host
kwargs = get_horovod_kwargs()
assert kwargs.get('num_hosts') == 1
- assert kwargs.get('num_slots') == 4
+ assert kwargs.get('num_slots') == 2
# Train on Parquet
dask_backend = RayBackend()
@@ -70,7 +70,7 @@ def run_test_parquet(
'input_features': input_features,
'output_features': output_features,
'combiner': {'type': 'concat', 'fc_size': 14},
- 'training': {'epochs': 2}
+ 'training': {'epochs': 2, 'batch_size': 8}
}
with tempfile.TemporaryDirectory() as tmpdir:
@@ -86,7 +86,7 @@ def run_test_parquet(
@pytest.mark.distributed
-def test_ray_tabular(ray_start_4_cpus):
+def test_ray_tabular(ray_start_2_cpus):
input_features = [
sequence_feature(reduce_output='sum'),
numerical_feature(normalization='zscore'),
| Parquet dataset does not have a `self.features` attribute, which is needed in the IterableBatcher
**Describe the bug**
The `IterableBatcher` will loop through all the features of a dataset (see[ this line of code](https://github.com/ludwig-ai/ludwig/blob/master/ludwig/data/batcher/iterable.py#L40)) object via an attribute `self.features` of a `Dataset` object
When the dataset is a parquet dataset (a new feature merged to master in [PR#1090](https://github.com/ludwig-ai/ludwig/commit/01d114ba76667dc52d1673d9be0809ee1e58087c)) (note that parquet is a required format when running ludwig on Ray), the `IteratableBatcher` cannot find this attribute `self.features`.
By going through the code of `parquet.py` I wasn't able to fnd a `self.features` attributes. There is only a self.reshape_features, which does not seem to be compatible with `IterableBatcher`.
**To Reproduce**
See my code on petfinder dataset: https://github.com/zhisbug/ludwig-petfinder/blob/master/petfinder-adoption-prediction/train.py#L11
**Expected behavior**
I guess a fix is needed to make IteratableBatcher compatible with ParquetDataset?
**Environment (please complete the following information):**
- OS: Ubuntu 18
- Python version: 3.6
- Ludwig version: master
**Additional context**
Add any other context about the problem here.
| 2021-03-29T15:06:11 |
|
ludwig-ai/ludwig | 1,198 | ludwig-ai__ludwig-1198 | [
"1197"
] | 4c5441a191c324047277311e54758a0995c0076d | diff --git a/ludwig/models/trainer.py b/ludwig/models/trainer.py
--- a/ludwig/models/trainer.py
+++ b/ludwig/models/trainer.py
@@ -113,6 +113,7 @@ def __init__(
staircase=False,
batch_size=128,
eval_batch_size=0,
+ should_shuffle=True,
shuffle_buffer_size=None,
bucketing_field=None,
validation_field='combined',
@@ -162,6 +163,8 @@ def __init__(
:type batch_size: Integer
:param eval_batch_size: Size of batch to pass to the model for evaluation.
:type eval_batch_size: Integer
+ :param should_shuffle: Shuffle batches during training when true (default: True).
+ :type shuffle_buffer_size: Boolean
:param shuffle_buffer_size: Size of buffer in number of examples to read for shuffling.
:type shuffle_buffer_size: Integer
:param bucketing_field: when batching, buckets datapoints based the
@@ -241,6 +244,7 @@ def __init__(
self.staircase = staircase
self.batch_size = batch_size
self.eval_batch_size = batch_size if eval_batch_size < 1 else eval_batch_size
+ self.should_shuffle = should_shuffle
self.shuffle_buffer_size = shuffle_buffer_size
self.bucketing_field = bucketing_field
self._validation_field = validation_field
@@ -513,6 +517,7 @@ def train(
set_random_seed(self.random_seed)
with training_set.initialize_batcher(
batch_size=self.batch_size,
+ should_shuffle=self.should_shuffle,
shuffle_buffer_size=self.shuffle_buffer_size,
seed=self.random_seed,
horovod=self.horovod,
@@ -801,6 +806,8 @@ def train_online(
):
with dataset.initialize_batcher(
batch_size=self.batch_size,
+ should_shuffle=self.should_shuffle,
+ shuffle_buffer_size=self.shuffle_buffer_size,
horovod=self.horovod
) as batcher:
| Is it possible to disable shuffle ?
Is there a way to disable shuffle of data during the processing of mini batches ? Let's say I have organized my train data = [0...1000] and my batch size is 256. I would like to force mini batches to process data in the same order without any randomization or shuffling.
batch 0 = [0..255]
batch 1 = [256..511] and so on..
| 2021-06-08T19:04:49 |
||
ludwig-ai/ludwig | 1,326 | ludwig-ai__ludwig-1326 | [
"1181"
] | 8eab807c6ca71a7d1fdda30c49924e4f68bf3101 | diff --git a/ludwig/features/audio_feature.py b/ludwig/features/audio_feature.py
--- a/ludwig/features/audio_feature.py
+++ b/ludwig/features/audio_feature.py
@@ -188,9 +188,29 @@ def reduce(series):
return merged_stats
merged_stats = df_engine.reduce_objects(audio_stats, reduce)
- merged_stats['mean'] = calculate_mean(merged_stats['sum'], merged_stats['count'])
- merged_stats['var'] = calculate_var(merged_stats['sum'], merged_stats['sum2'], merged_stats['count'])
- return processed_audio, merged_stats
+ merged_stats['mean'] = calculate_mean(merged_stats['sum'],
+ merged_stats['count'])
+ merged_stats['var'] = calculate_var(merged_stats['sum'],
+ merged_stats['sum2'],
+ merged_stats['count'])
+ merged_stats['std'] = np.sqrt(
+ merged_stats['var'] / float(merged_stats['count']))
+ print_statistics = (
+ "{} audio files loaded.\n"
+ "Statistics of audio file lengths:\n"
+ "- mean: {:.4f}\n"
+ "- std: {:.4f}\n"
+ "- max: {:.4f}\n"
+ "- min: {:.4f}\n"
+ "- cropped audio_files: {}\n"
+ "Max length was given as {}s"
+ ).format(
+ merged_stats['count'], merged_stats['mean'],
+ merged_stats['std'], merged_stats['max'],
+ merged_stats['min'], merged_stats['cropped'],
+ audio_file_length_limit_in_s)
+ logger.debug(print_statistics)
+ return processed_audio
@staticmethod
def _transform_to_feature(
@@ -349,7 +369,7 @@ def add_feature_data(
'There are no audio files in the dataset provided.')
if feature[PREPROCESSING]['in_memory']:
- audio_features, audio_stats = AudioFeatureMixin._process_in_memory(
+ audio_features = AudioFeatureMixin._process_in_memory(
input_df[feature[NAME]],
src_path,
audio_feature_dict,
@@ -361,24 +381,6 @@ def add_feature_data(
backend
)
proc_df[proc_column] = audio_features
-
- audio_stats['std'] = np.sqrt(
- audio_stats['var'] / float(audio_stats['count']))
- print_statistics = (
- "{} audio files loaded.\n"
- "Statistics of audio file lengths:\n"
- "- mean: {:.4f}\n"
- "- std: {:.4f}\n"
- "- max: {:.4f}\n"
- "- min: {:.4f}\n"
- "- cropped audio_files: {}\n"
- "Max length was given as {}s"
- ).format(
- audio_stats['count'], audio_stats['mean'],
- audio_stats['std'], audio_stats['max'],
- audio_stats['min'], audio_stats['cropped'],
- audio_file_length_limit_in_s)
- logger.debug(print_statistics)
else:
backend.check_lazy_load_supported(feature)
diff --git a/ludwig/utils/audio_utils.py b/ludwig/utils/audio_utils.py
--- a/ludwig/utils/audio_utils.py
+++ b/ludwig/utils/audio_utils.py
@@ -237,9 +237,8 @@ def calculate_incr_mean(count, mean, length):
def calculate_var(sum1, sum2, count):
- # todo: revert from 'max(1, count -1)' back to 'count - 1' in denominator
- # when GH Issue #1181 is addressed
- return (sum2 - ((sum1 * sum1) / float(count))) / float(max(1, count - 1))
+ return (sum2 - ((sum1 * sum1) / float(count))) / float(count - 1) \
+ if count > 1 else 0.0
def calculate_mean(sum1, count):
| ZeroDivisionError in audio feature prediction with only a single record
**Describe the bug**
When generating a model prediction involving an audio feature and the data set contains only one record, a `ZeroDvisionError` exception is raised in `ludwig.utils.audio_utils.calcuate_var()` function. The error occurs because the variable `count = 1` which results in a zero in the denominator of this expression:
```
return (sum2 - ((sum1 * sum1) / float(count))) / float(count - 1)
```
**To Reproduce**
Steps to reproduce the behavior:
Run unit test `test_server.py`
**Expected behavior**
Successful prediction with only a single record.
**Log file**
Here is log and error messages:
```
PASSED [ 66%]FAILED [100%]Failed to run predict: float division by zero
Traceback (most recent call last):
File "/opt/project/ludwig/serve.py", line 92, in predict
dataset=[entry], data_format=dict
File "/opt/project/ludwig/api.py", line 683, in predict
backend=self.backend,
File "/opt/project/ludwig/data/preprocessing.py", line 1728, in preprocess_for_prediction
backend
File "/opt/project/ludwig/data/preprocessing.py", line 162, in preprocess_for_prediction
backend=backend
File "/opt/project/ludwig/data/preprocessing.py", line 1080, in build_dataset
skip_save_processed_input
File "/opt/project/ludwig/data/preprocessing.py", line 1225, in build_data
skip_save_processed_input
File "/opt/project/ludwig/features/audio_feature.py", line 343, in add_feature_data
backend
File "/opt/project/ludwig/features/audio_feature.py", line 174, in _process_in_memory
merged_stats['var'] = calculate_var(merged_stats['sum'], merged_stats['sum2'], merged_stats['count'])
File "/opt/project/ludwig/utils/audio_utils.py", line 240, in calculate_var
return (sum2 - ((sum1 * sum1) / float(count))) / float(count - 1)
ZeroDivisionError: float division by zero
tests/integration_tests/test_server.py:193 (test_server_integration_with_audio[True])
500 != 200
Expected :200
Actual :500
<Click to see difference>
single_record = True, csv_filename = 'EA8A7B56DE.csv'
@pytest.mark.parametrize('single_record', [False, True])
def test_server_integration_with_audio(single_record, csv_filename):
# Audio Inputs
audio_dest_folder = os.path.join(os.getcwd(), 'generated_audio')
# Resnet encoder
input_features = [
audio_feature(
folder=audio_dest_folder,
),
text_feature(encoder='embed', min_len=1),
numerical_feature(normalization='zscore')
]
output_features = [
category_feature(vocab_size=2),
numerical_feature()
]
rel_path = generate_data(input_features, output_features, csv_filename)
model, output_dir = train_model(input_features, output_features,
data_csv=rel_path)
app = server(model)
client = TestClient(app)
response = client.get('/')
assert response.status_code == 200
response = client.post('/predict')
# expect the HTTP 400 error code for this situation
assert response.status_code == 400
assert response.json() == ALL_FEATURES_PRESENT_ERROR
data_df = read_csv(rel_path)
if single_record:
# Single record prediction
first_entry = data_df.T.to_dict()[0]
data, files = convert_to_form(first_entry)
server_response = client.post('/predict', data=data, files=files)
> assert server_response.status_code == 200
E assert 500 == 200
../../tests/integration_tests/test_server.py:233: AssertionError
```
**Environment (please complete the following information):**
- OS: [e.g. iOS] Ludwig Docker container
- Version [e.g. 22]
- Python version: 3.6.9
- Ludwig version: 0.4-dev0
**Additional context**
As short-term work-around modified denominator to be `max(1, count-1)`.
| 2021-09-26T02:10:52 |
||
ludwig-ai/ludwig | 1,327 | ludwig-ai__ludwig-1327 | [
"1323"
] | 84ea03c6f80ef306a5d9a3f0b0d381a244da0b41 | diff --git a/ludwig/encoders/sequence_encoders.py b/ludwig/encoders/sequence_encoders.py
--- a/ludwig/encoders/sequence_encoders.py
+++ b/ludwig/encoders/sequence_encoders.py
@@ -1843,7 +1843,7 @@ def __init__(
if self.reduce_output is not None:
logger.debug(' FCStack')
self.fc_stack = FCStack(
- self.conv1d_stack.output_shape[0],
+ self.conv1d_stack.output_shape[-1],
layers=fc_layers,
num_layers=num_fc_layers,
default_fc_size=fc_size,
diff --git a/ludwig/features/timeseries_feature.py b/ludwig/features/timeseries_feature.py
--- a/ludwig/features/timeseries_feature.py
+++ b/ludwig/features/timeseries_feature.py
@@ -155,15 +155,20 @@ class TimeseriesInputFeature(TimeseriesFeatureMixin, SequenceInputFeature):
max_sequence_length = None
def __init__(self, feature, encoder_obj=None):
+ # add required sequence encoder parameters for time series
+ feature['embedding_size'] = 1
+ feature['should_embed'] = False
+
+ # initialize encoder for time series
super().__init__(feature, encoder_obj=encoder_obj)
- def call(self, inputs, training=None, mask=None):
- assert isinstance(inputs, tf.Tensor)
- assert inputs.dtype == tf.float16 or inputs.dtype == tf.float32 or \
- inputs.dtype == tf.float64
+ def forward(self, inputs, training=None, mask=None):
+ assert isinstance(inputs, torch.Tensor)
+ assert inputs.dtype == torch.float16 or inputs.dtype == torch.float32 \
+ or inputs.dtype == torch.float64
assert len(inputs.shape) == 2
- inputs_exp = tf.cast(inputs, dtype=tf.float32)
+ inputs_exp = inputs.type(torch.float32)
encoder_output = self.encoder_obj(
inputs_exp, training=training, mask=mask
)
@@ -183,8 +188,6 @@ def update_config_with_metadata(
):
input_feature['max_sequence_length'] = feature_metadata[
'max_timeseries_length']
- input_feature['embedding_size'] = 1
- input_feature['should_embed'] = False
@staticmethod
def populate_defaults(input_feature):
| diff --git a/tests/integration_tests/test_timeseries_feature.py b/tests/integration_tests/test_timeseries_feature.py
new file mode 100644
--- /dev/null
+++ b/tests/integration_tests/test_timeseries_feature.py
@@ -0,0 +1,49 @@
+import pytest
+
+import torch
+
+from ludwig.features.timeseries_feature import TimeseriesInputFeature
+from tests.integration_tests.utils import timeseries_feature
+
+BATCH_SIZE = 2
+SEQ_SIZE = 10
+DEFAULT_FC_SIZE = 4
+
+
[email protected](
+ 'enc_encoder',
+ [
+ 'stacked_cnn', 'parallel_cnn', 'stacked_parallel_cnn', 'rnn', 'cnnrnn',
+ 'passthrough'
+ ]
+)
+def test_timeseries_feature(enc_encoder):
+ # synthetic time series tensor
+ timeseries_tensor = torch.randn([BATCH_SIZE, SEQ_SIZE],
+ dtype=torch.float32)
+
+ # generate feature config
+ timeseries_feature_config = timeseries_feature(
+ encoder=enc_encoder,
+ max_len=SEQ_SIZE,
+ fc_layers=[{'fc_size': DEFAULT_FC_SIZE}],
+ # simulated parameters determined by pre-processing
+ max_sequence_length=SEQ_SIZE,
+ )
+
+ # instantiate input feature object
+ timeseries_input_feature = TimeseriesInputFeature(timeseries_feature_config)
+
+ # pass synthetic tensor through input feature
+ encoder_output = timeseries_input_feature(timeseries_tensor)
+
+ # confirm correctness of the encoder output
+ assert isinstance(encoder_output, dict)
+ assert 'encoder_output' in encoder_output
+ assert isinstance(encoder_output['encoder_output'], torch.Tensor)
+ if enc_encoder == 'passthrough':
+ assert encoder_output['encoder_output'].shape \
+ == (BATCH_SIZE, SEQ_SIZE, 1)
+ else:
+ assert encoder_output['encoder_output'].shape \
+ == (BATCH_SIZE, DEFAULT_FC_SIZE)
| Migrate time series features to PyTorch
| 2021-09-26T21:40:37 |
|
ludwig-ai/ludwig | 1,344 | ludwig-ai__ludwig-1344 | [
"1341"
] | 9051edcd60f46fc340c979f50494b7be2f2f1143 | diff --git a/ludwig/features/date_feature.py b/ludwig/features/date_feature.py
--- a/ludwig/features/date_feature.py
+++ b/ludwig/features/date_feature.py
@@ -44,7 +44,7 @@ class DateFeatureMixin:
'missing_value_strategy': {'type': 'string', 'enum': MISSING_VALUE_STRATEGY_OPTIONS},
'fill_value': {'type': 'string'},
'computed_fill_value': {'type': 'string'},
- 'datetime_format': {'type': 'string'},
+ 'datetime_format': {'type': ['string', 'null']},
}
@staticmethod
diff --git a/ludwig/features/sequence_feature.py b/ludwig/features/sequence_feature.py
--- a/ludwig/features/sequence_feature.py
+++ b/ludwig/features/sequence_feature.py
@@ -75,6 +75,7 @@ class SequenceFeatureMixin:
'padding': {'type': 'string', 'enum': ['right', 'left']},
'tokenizer': {'type': 'string', 'enum': sorted(list(tokenizer_registry.keys()))},
'lowercase': {'type': 'boolean'},
+ 'vocab_file': {'type': ['string', 'null']},
'missing_value_strategy': {'type': 'string', 'enum': MISSING_VALUE_STRATEGY_OPTIONS},
'fill_value': {'type': 'string'},
'computed_fill_value': {'type': 'string'},
diff --git a/ludwig/features/text_feature.py b/ludwig/features/text_feature.py
--- a/ludwig/features/text_feature.py
+++ b/ludwig/features/text_feature.py
@@ -67,12 +67,12 @@ class TextFeatureMixin:
preprocessing_schema = {
'char_tokenizer': {'type': 'string', 'enum': sorted(list(tokenizer_registry.keys()))},
- 'char_vocab_file': {'type': 'string'},
+ 'char_vocab_file': {'type': ['string', 'null']},
'char_sequence_length_limit': {'type': 'integer', 'minimum': 0},
'char_most_common': {'type': 'integer', 'minimum': 0},
'word_tokenizer': {'type': 'string', 'enum': sorted(list(tokenizer_registry.keys()))},
- 'pretrained_model_name_or_path': {'type': 'string'},
- 'word_vocab_file': {'type': 'string'},
+ 'pretrained_model_name_or_path': {'type': ['string', 'null']},
+ 'word_vocab_file': {'type': ['string', 'null']},
'word_sequence_length_limit': {'type': 'integer', 'minimum': 0},
'word_most_common': {'type': 'integer', 'minimum': 0},
'padding_symbol': {'type': 'string'},
| diff --git a/tests/ludwig/utils/test_schema.py b/tests/ludwig/utils/test_schema.py
--- a/tests/ludwig/utils/test_schema.py
+++ b/tests/ludwig/utils/test_schema.py
@@ -17,6 +17,20 @@
import pytest
from jsonschema.exceptions import ValidationError
+
+from ludwig.features.audio_feature import AudioFeatureMixin
+from ludwig.features.bag_feature import BagFeatureMixin
+from ludwig.features.binary_feature import BinaryFeatureMixin
+from ludwig.features.category_feature import CategoryFeatureMixin
+from ludwig.features.date_feature import DateFeatureMixin
+from ludwig.features.h3_feature import H3FeatureMixin
+from ludwig.features.image_feature import ImageFeatureMixin
+from ludwig.features.numerical_feature import NumericalFeatureMixin
+from ludwig.features.sequence_feature import SequenceFeatureMixin
+from ludwig.features.set_feature import SetFeatureMixin
+from ludwig.features.text_feature import TextFeatureMixin
+from ludwig.features.timeseries_feature import TimeseriesFeatureMixin
+from ludwig.features.vector_feature import VectorFeatureMixin
from ludwig.utils.defaults import merge_with_defaults
from ludwig.utils.schema import validate_config, OUTPUT_FEATURE_TYPES
@@ -249,3 +263,36 @@ def test_config_fill_values():
}
with pytest.raises(ValidationError):
validate_config(config)
+
+
+def test_validate_with_preprocessing_defaults():
+ config = {
+ "input_features": [
+ audio_feature('/tmp/destination_folder',
+ preprocessing=AudioFeatureMixin.preprocessing_defaults),
+ bag_feature(preprocessing=BagFeatureMixin.preprocessing_defaults),
+ binary_feature(preprocessing=BinaryFeatureMixin.preprocessing_defaults),
+ category_feature(preprocessing=CategoryFeatureMixin.preprocessing_defaults),
+ date_feature(preprocessing=DateFeatureMixin.preprocessing_defaults),
+ h3_feature(preprocessing=H3FeatureMixin.preprocessing_defaults),
+ image_feature('/tmp/destination_folder',
+ preprocessing=ImageFeatureMixin.preprocessing_defaults),
+ numerical_feature(preprocessing=NumericalFeatureMixin.preprocessing_defaults),
+ sequence_feature(preprocessing=SequenceFeatureMixin.preprocessing_defaults),
+ set_feature(preprocessing=SetFeatureMixin.preprocessing_defaults),
+ text_feature(preprocessing=TextFeatureMixin.preprocessing_defaults),
+ timeseries_feature(preprocessing=TimeseriesFeatureMixin.preprocessing_defaults),
+ vector_feature(preprocessing=VectorFeatureMixin.preprocessing_defaults),
+ ],
+ "output_features": [{"name": "target", "type": "category"}],
+ "training": {
+ "decay": True,
+ "learning_rate": 0.001,
+ "validation_field": "target",
+ "validation_metric": "accuracy"
+ },
+ }
+
+ validate_config(config)
+ config = merge_with_defaults(config)
+ validate_config(config)
| ValidationError: None is not of type 'string' while loading a trained model
**Describe the bug**
```sh
ValidationError: None is not of type 'string'
Failed validating 'type' in schema['properties']['input_features']['items']['allOf'][23]['then']['properties']['preprocessing']['properties']['char_vocab_file']:
{'type': 'string'}
On instance['input_features'][0]['preprocessing']['char_vocab_file']:
None
```
**To Reproduce**
Steps to reproduce the behavior:
1. Train the model with the following config
```python
from ludwig.api import LudwigModel
config = {
"input_features": [{
"name": "text",
"type": "text",
"level": 'word',
"encoder": "rnn",
"pretrained_embeddings": 'glove/glove.6B.300d.txt',
"embedding_size": 300,
"preprocessing": { "word_vocab_file": 'glove/glove.6B.300d.txt' }
}],
"output_features": [{ "name": "target", "type": "category" }],
"training": {
"decay": True,
"learning_rate": 0.001,
"validation_field": "target",
"validation_metric": "accuracy"
},
}
dataset_file_path = "../data/train.csv"
model = LudwigModel(config)
training_statistics, preprocessed_data, output_directory = model.train(dataset=dataset_file_path)
```
2. Load it with the ludwig api
```python
model = LudwigModel.load(os.path.join(output_directory, "model"))
```
**Expected behavior**
The model should be loaded without an error.
**Screenshots**
<img width="1351" alt="image" src="https://user-images.githubusercontent.com/4970420/135705934-46a8bc3c-4e5e-4b2b-a41e-5dfe94162810.png">
**Environment (please complete the following information):**
- OS: [Ubuntu]
- Version [18.04]
- Python version: 3.9
- Ludwig version: 0.4
**Additional context**
Trying to use Ludwig to solve a kaggle competition to get a taste of the entire workflow.
Here is the link to the dataset.
https://www.kaggle.com/c/nlp-getting-started/data
Thanks for the help!
| 2021-10-02T20:03:20 |
|
ludwig-ai/ludwig | 1,517 | ludwig-ai__ludwig-1517 | [
"1511"
] | c346604f82c61b4e4faeef599cf71032147611a0 | diff --git a/ludwig/utils/eval_utils.py b/ludwig/utils/eval_utils.py
--- a/ludwig/utils/eval_utils.py
+++ b/ludwig/utils/eval_utils.py
@@ -262,15 +262,15 @@ def stats(self):
def roc_curve(conditions, prediction_scores, pos_label=None,
sample_weight=None):
- return metrics.roc_curve(conditions, prediction_scores, pos_label,
- sample_weight)
+ return metrics.roc_curve(conditions, prediction_scores, pos_label=pos_label,
+ sample_weight=sample_weight)
def roc_auc_score(conditions, prediction_scores, average='micro',
sample_weight=None):
try:
- return metrics.roc_auc_score(conditions, prediction_scores, average,
- sample_weight)
+ return metrics.roc_auc_score(conditions, prediction_scores, average=average,
+ sample_weight=sample_weight)
except ValueError as ve:
logger.info(ve)
@@ -278,7 +278,7 @@ def roc_auc_score(conditions, prediction_scores, average='micro',
def precision_recall_curve(conditions, prediction_scores, pos_label=None,
sample_weight=None):
return metrics.precision_recall_curve(conditions, prediction_scores,
- pos_label, sample_weight)
+ pos_label=pos_label, sample_weight=sample_weight)
def average_precision_score(conditions, prediction_scores, average='micro',
@@ -287,29 +287,3 @@ def average_precision_score(conditions, prediction_scores, average='micro',
return metrics.average_precision_score(conditions, prediction_scores,
average=average,
sample_weight=sample_weight)
-
-# if __name__ == '__main__':
-# parser = argparse.ArgumentParser(
-# description='This script trains and tests a model.')
-# parser.add_argument('gold_standard', help='file containing gold standars')
-# parser.add_argument(PREDICTIONS, help='file containing predictions')
-# parser.add_argument('output_fp', help='output file')
-# args = parser.parse_args()
-#
-# hdf5_data = h5py.File(args.gold_standard, 'r')
-# split = hdf5_data[SPLIT].value
-# column = hdf5_data['macros'].value
-# hdf5_data.close()
-# conditions = column[split == 2] # ground truth
-#
-# predictions = np.load(args.predictions)
-#
-# confusion_matrix = ConfusionMatrix(predictions, conditions)
-#
-# results = load_json(args.output_fp)
-# results['confusion_matrix_stats'] = {
-# 'confusion_matrix': confusion_matrix.cm.tolist(),
-# 'overall_stats': confusion_matrix.stats(),
-# 'per_class_stats': confusion_matrix.per_class_stats()
-# }
-# save_json(args.output_fp, results)
diff --git a/ludwig/utils/strings_utils.py b/ludwig/utils/strings_utils.py
--- a/ludwig/utils/strings_utils.py
+++ b/ludwig/utils/strings_utils.py
@@ -37,8 +37,8 @@
COMMA_REGEX = re.compile(r'\s*,\s*')
UNDERSCORE_REGEX = re.compile(r'\s*_\s*')
-BOOL_TRUE_STRS = {'yes', 'y', 'true', 't', '1'}
-BOOL_FALSE_STRS = {'no', 'n', 'false', 'f', '0'}
+BOOL_TRUE_STRS = {'yes', 'y', 'true', 't', '1', '1.0'}
+BOOL_FALSE_STRS = {'no', 'n', 'false', 'f', '0', '0.0'}
# Update the following if BOOL_TRUE_STRS or BOOL_FALSE_STRS changes
MAX_DISTINCT_BOOL_PERMUTATIONS = 70
| diff --git a/tests/ludwig/utils/test_strings_utils.py b/tests/ludwig/utils/test_strings_utils.py
--- a/tests/ludwig/utils/test_strings_utils.py
+++ b/tests/ludwig/utils/test_strings_utils.py
@@ -31,5 +31,6 @@ def test_are_conventional_bools():
assert strings_utils.are_conventional_bools(['True', 'Fales']) == False
assert strings_utils.are_conventional_bools(['0', '1']) == True
assert strings_utils.are_conventional_bools(['0', '2']) == False
+ assert strings_utils.are_conventional_bools(['1.0', '0.0']) == True
assert strings_utils.are_conventional_bools(['high', 'low']) == False
assert strings_utils.are_conventional_bools(['human', 'bot']) == False
| [automl] Binary column type not detected for floats
For example, the titanic dataset has values like `1.0` and `0.0`. These can correctly be interpreted as binary values `1` and `0` respectively, but the auto detect system is treating this as a categorical feature.
| @tgaddair I thinh that:
`output_features:
-
name: Survived
type: binary`
and
`output_features:
-
name: Survived
type: category`
are interpret similar, if you want numeric output you need of course:
`output_features:
-
name: Survived
type: numeric`
but for me the question is **how to configure sigmoid layer as output to probaability value result?** Equivalent for:
`pred = model.predict_proba(X_test)`
Good point @PeterPirog . If you set the output to be binary, you also get the raw probabilities together with the predictions (which are basically just a transformation of probabilities > threshold, and you can set the threshold in the output feature too if you want, by default is 0.5).
Does that work for you?
We could also make it so that users can define an activation function before the final output of the numerical feature, but the issue there would be that even if you do, say, a sigmoid to squash between 0 and 1, you'll still train with MSE loss (which may be fine, there are a few papers where it is shown that performance are comparable and in few cases even better than using cross entropy).
@w4nderlust, I'm very glad to use your framework. Can You give some suggestions how to set in ludwig metric for unballanced data because accuracy is bad option for it. I noticed some error for ROC AUC:
```
return metrics.roc_curve(conditions, prediction_scores, pos_label,
TypeError: roc_curve() takes 2 positional arguments but 4 were given
```
Configuration is:
```
input_features:
-
name: Pclass
type: category
-
name: Name
type: text
level: char
encoder: parallel_cnn
-
name: Sex
type: category
-
name: Age
type: numerical
preprocessing:
missing_value_strategy: fill_with_mean
-
name: SibSp
type: numerical
-
name: Parch
type: numerical
-
name: Fare
type: numerical
preprocessing:
missing_value_strategy: fill_with_mean
-
name: Embarked
type: category
output_features:
-
name: Survived
type: binary
```
| 2021-11-24T16:43:57 |
ludwig-ai/ludwig | 1,618 | ludwig-ai__ludwig-1618 | [
"1617"
] | 2c31bc090df22f0d190eca98d395f6f4d3640960 | diff --git a/ludwig/combiners/combiners.py b/ludwig/combiners/combiners.py
--- a/ludwig/combiners/combiners.py
+++ b/ludwig/combiners/combiners.py
@@ -79,7 +79,7 @@ class Meta:
class ConcatCombiner(tf.keras.Model):
def __init__(
self,
- input_features: Optional[List] = None,
+ input_features: Optional[Dict] = None,
config: ConcatCombinerConfig = None,
**kwargs
):
@@ -507,7 +507,7 @@ class Meta:
class TransformerCombiner(tf.keras.Model):
def __init__(
self,
- input_features: Optional[List] = None,
+ input_features: Optional[Dict] = None,
config: TransformerCombinerConfig = None,
**kwargs
):
@@ -638,7 +638,7 @@ class Meta:
class TabTransformerCombiner(tf.keras.Model):
def __init__(
self,
- input_features: Optional[List] = None,
+ input_features: Optional[Dict] = None,
config: TabTransformerCombinerConfig = None,
**kwargs
):
@@ -655,8 +655,9 @@ def __init__(
self.embed_input_feature_name = config.embed_input_feature_name
if self.embed_input_feature_name:
- vocab = [i_f for i_f in input_features
- if i_f[TYPE] != NUMERICAL or i_f[TYPE] != BINARY]
+ vocab = [i_f
+ for i_f in input_features
+ if input_features[i_f].type != NUMERICAL or input_features[i_f].type != BINARY]
if self.embed_input_feature_name == 'add':
self.embed_i_f_name_layer = Embed(vocab, config.hidden_size,
force_embedding_size=True)
@@ -688,9 +689,9 @@ def __init__(
logger.debug(' Projectors')
self.projectors = [Dense(projector_size) for i_f in input_features
- if i_f[TYPE] != NUMERICAL and i_f[TYPE] != BINARY]
- self.skip_features = [i_f[NAME] for i_f in input_features
- if i_f[TYPE] == NUMERICAL or i_f[TYPE] == BINARY]
+ if input_features[i_f].type != NUMERICAL and input_features[i_f].type != BINARY]
+ self.skip_features = [i_f for i_f in input_features
+ if input_features[i_f].type == NUMERICAL or input_features[i_f].type == BINARY]
logger.debug(' TransformerStack')
self.transformer_stack = TransformerStack(
| diff --git a/tests/integration_tests/test_combiners.py b/tests/integration_tests/test_combiners.py
--- a/tests/integration_tests/test_combiners.py
+++ b/tests/integration_tests/test_combiners.py
@@ -20,6 +20,7 @@
TransformerCombinerConfig,
sequence_encoder_registry,
)
+from ludwig.models.ecd import build_inputs
from ludwig.utils.schema_utils import load_config
logger = logging.getLogger(__name__)
@@ -382,12 +383,12 @@ def test_tabtransformer_combiner(encoder_outputs):
input_features_def = [
{'name': 'feature_1', 'type': 'numerical'},
- {'name': 'feature_2', 'type': 'category'}
+ {'name': 'feature_2', 'type': 'category', 'vocab': ['a', 'b', 'c']}
]
# setup combiner to test
combiner = TabTransformerCombiner(
- input_features=input_features_def,
+ input_features=build_inputs(input_features_def),
config=load_config(TabTransformerCombinerConfig)
)
@@ -399,7 +400,7 @@ def test_tabtransformer_combiner(encoder_outputs):
# setup combiner to test
combiner = TabTransformerCombiner(
- input_features=input_features_def,
+ input_features=build_inputs(input_features_def),
config=load_config(
TabTransformerCombinerConfig,
embed_input_feature_name=56
@@ -414,7 +415,7 @@ def test_tabtransformer_combiner(encoder_outputs):
# setup combiner to test
combiner = TabTransformerCombiner(
- input_features=input_features_def,
+ input_features=build_inputs(input_features_def),
config=load_config(
TabTransformerCombinerConfig,
embed_input_feature_name='add'
| [tf-legacy] input_features type mismatch in combiners
**Describe the bug**
On the `tf-legacy` branch, combiners expect `input_features` to be a list, but `build_inputs()` outputs an `OrderedDict`. In particular, this causes the `TabTransformerCombiner` to break.
**To Reproduce**
Attempt to train a model using the `titanic` dataset. Sample config:
```
input_features:
- name: Pclass
type: category
column: Pclass
- name: Sex
type: category
column: Sex
- name: Age
type: numerical
column: Age
- name: SibSp
type: numerical
column: SibSp
- name: Parch
type: numerical
column: Parch
- name: Ticket
type: category
column: Ticket
- name: Fare
type: numerical
column: Fare
- name: Cabin
type: category
column: Cabin
- name: Embarked
type: category
column: Embarked
output_features:
- name: Survived
type: category
column: Survived
combiner:
type: tabtransformer
num_layers: 1
hidden_size: 256
num_heads: 8
transformer_fc_size: 256
dropout: 0.1
num_fc_layers: 0
fc_size: 256
use_bias: true
weights_initializer: glorot_uniform
bias_initializer: zeros
fc_activation: relu
fc_dropout: 0
fc_residual: false
reduce_output: concat
```
**Expected behavior**
Training succeeds.
| 2021-12-21T19:42:23 |
|
ludwig-ai/ludwig | 1,650 | ludwig-ai__ludwig-1650 | [
"1621"
] | ceabb806d7b333fcfc49705e2e627f2562b9df5b | diff --git a/ludwig/utils/defaults.py b/ludwig/utils/defaults.py
--- a/ludwig/utils/defaults.py
+++ b/ludwig/utils/defaults.py
@@ -76,10 +76,10 @@
}
default_optimizer_params_registry = {
- "sgd": {"lr": 0.001},
- "stochastic_gradient_descent": {"lr": 0.001},
- "gd": {"lr": 0.001},
- "gradient_descent": {"lr": 0.001},
+ "sgd": {},
+ "stochastic_gradient_descent": {},
+ "gd": {},
+ "gradient_descent": {},
"adam": {
"betas": (0.9, 0.999),
# 'beta_1': 0.9,
@@ -247,6 +247,7 @@ def merge_with_defaults(config):
# ===== Training Optimizer =====
optimizer = config[TRAINING]["optimizer"]
+ set_default_value(optimizer, "lr", config[TRAINING]["learning_rate"])
default_optimizer_params = get_default_optimizer_params(optimizer[TYPE])
for param in default_optimizer_params:
set_default_value(optimizer, param, default_optimizer_params[param])
| Inconsistent learning rates shown in output when specifying learning rate in config
When specifying the learning_rate parameter in the training section of the config, `description.json` shows a different default learning rate in the optimizer section:
config.yaml:
<img width="231" alt="Screen Shot 2021-12-21 at 4 46 42 PM" src="https://user-images.githubusercontent.com/687280/147016130-2856cfda-c399-4fc8-b79b-2fe96003b559.png">
description.json:
<img width="698" alt="Screen Shot 2021-12-21 at 4 47 04 PM" src="https://user-images.githubusercontent.com/687280/147016222-60e44fd7-fd92-474d-b73d-17c5c62d5954.png">
Expected behavior is that there is one learning rate, and if it is specified in the config it should match that.
| Example config for repro (titanic example):
```
input_features:
-
name: Pclass
type: category
-
name: Sex
type: category
-
name: Age
type: numerical
preprocessing:
missing_value_strategy: fill_with_mean
-
name: SibSp
type: numerical
-
name: Parch
type: numerical
-
name: Fare
type: numerical
preprocessing:
missing_value_strategy: fill_with_mean
-
name: Embarked
type: category
output_features:
-
name: Survived
type: binary
norm: null
class_weights: 1
training:
epochs: 100
learning_rate: 0.05
optimizer:
type: sgd
decay: false
regularization_lambda: 0
```
Curious about this as well. I think this traces to `merge_with_defaults` : As an example, in the case of `sgd`, `merge_with_defaults` will check `type` and load the defaults (including `lr`) specified in `default_optimizer_params_registry` regardless of how `learning_rate` is set: https://github.com/ludwig-ai/ludwig/blob/cfa01f07568d545167f36a2ef171a1d6ddb7dab5/ludwig/utils/defaults.py#L244
I believe the reason is that in TF the lr was not an init parameter of the optimizer but a parameter of the step function. As we have external mechanisms outside of the optimizer to set the lr (warmup and decay) we need to change this and reconcile them.
Could this one of culprits for [training differences between Torch and Tensorflow](https://github.com/ludwig-ai/ludwig/issues/1613)?
EDIT: I just saw your comment in Slack about how this doesn't seem to affect training. Nevermind! | 2022-01-06T00:44:04 |
|
ludwig-ai/ludwig | 1,702 | ludwig-ai__ludwig-1702 | [
"1701"
] | 65b3b1a7cdfc7149a2a8559eef1edbe4d38a3080 | diff --git a/ludwig/features/base_feature.py b/ludwig/features/base_feature.py
--- a/ludwig/features/base_feature.py
+++ b/ludwig/features/base_feature.py
@@ -217,6 +217,7 @@ def __init__(self, feature: Dict[str, Any], other_output_features: Dict[str, "Ou
logger.debug(" FCStack")
self.input_size = get_input_size_with_dependencies(self.input_size, self.dependencies, other_output_features)
+ feature["input_size"] = self.input_size # needed for future overrides
self.fc_stack = FCStack(
first_layer_input_size=self.input_size,
| Shape mismatch when introducing multiple levels of dependencies
**Describe the bug**
When introducing multiple levels of dependencies, the shape of the _concatenated hidden states_ does not match the _input size for the dense layer of the output feature_.
In my case, the text output feature `qty_frac` depends on text output feature `summary`, and numerical output feature `qty` in turn depends on `qty_frac`.
I get the following error when running `ludwig train`:
```python-traceback
RuntimeError: mat1 and mat2 shapes cannot be multiplied (6x768 and 512x1)
```
**To Reproduce**
Minimal, reproducible example using bash and docker as only dependencies:
```bash
#!/usr/bin/env bash
FEATURE_LIST=$(
docker run -i mikefarah/yq -o json -I 0 e '.' - <<EOF
- name: document
type: text
- name: summary
type: text
- name: qty_frac
type: text
- name: qty
type: numerical
EOF
)
mkdir /tmp/ludwig-debug
docker run \
-it \
-v /tmp/ludwig-debug/:/workdir \
ludwigai/ludwig:nightly \
synthesize_dataset \
--features $FEATURE_LIST \
--dataset_size 10 \
--output_path /workdir/synthetic_data.csv
cat <<EOF >/tmp/ludwig-debug/config.yml
input_features:
- name: document
type: text
level: word
output_features:
- name: summary
type: text
level: word
decoder: generator
- name: qty_frac
type: text
level: word
decoder: generator
dependencies:
- summary
- name: qty
type: numerical
dependencies:
- qty_frac
EOF
docker run \
-it \
-v /tmp/ludwig-debug/:/workdir \
ludwigai/ludwig:nightly \
train \
--dataset /workdir/synthetic_data.csv \
--config_file /workdir/config.yml \
--output_directory /workdir/results
```
**Expected behavior**
Training starts without error.
**Screenshots**
Excerpt from the traceback:
```python-traceback
File "/usr/local/lib/python3.7/site-packages/ludwig/features/numerical_feature.py", line 269, in logits
return self.decoder_obj(hidden)
File "/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.7/site-packages/ludwig/decoders/generic_decoders.py", line 58, in forward
return self.dense(inputs)
File "/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.7/site-packages/ludwig/utils/torch_utils.py", line 212, in forward
output = torch.squeeze(self.dense(input), dim=-1)
File "/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.7/site-packages/torch/nn/modules/linear.py", line 103, in forward
return F.linear(input, self.weight, self.bias)
File "/usr/local/lib/python3.7/site-packages/torch/nn/functional.py", line 1848, in linear
return torch._C._nn.linear(input, weight, bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (6x768 and 512x1)
```
**Environment:**
See reproducible example, run in environment with:
- bash: `GNU bash, version 5.0.17(1)-release (x86_64-pc-linux-gnu)`
- docker: `Docker version 20.10.11+azure-3, build dea9396e184290f638ea873c76db7c80efd5a1d2`
The `ludwigai/ludwig:nightly` Docker image was built from main at 89d18365c41c4ded68edd2095349ce4a6caf5d18.
| For reference, here's the [related conversation on Slack](https://ludwig-ai.slack.com/archives/C01PN6M2RSM/p1642806615331569).
After debugging, I found out that the `input_size` field of `TextOutputFeature` "qty_frac" is being overwritten after being correctly computed.
#### Defined multiple inheritance
`SequenceOutputFeature` inherits from `OutputFeature`:
https://github.com/ludwig-ai/ludwig/blob/65b3b1a7cdfc7149a2a8559eef1edbe4d38a3080/ludwig/features/sequence_feature.py#L215
`TextOutputFeature` in turn inherits from `SequenceOutputFeature`:
https://github.com/ludwig-ai/ludwig/blob/65b3b1a7cdfc7149a2a8559eef1edbe4d38a3080/ludwig/features/text_feature.py#L311
#### Modifications to `input_size`
1. In the constructor of `OutputFeature`, the input size is computed correctly based on dependencies:
https://github.com/ludwig-ai/ludwig/blob/65b3b1a7cdfc7149a2a8559eef1edbe4d38a3080/ludwig/features/base_feature.py#L219
2. In the constructor of `SequenceOutputFeature`, it gets overwritten with the default. The computed dependency information is lost.
https://github.com/ludwig-ai/ludwig/blob/65b3b1a7cdfc7149a2a8559eef1edbe4d38a3080/ludwig/features/sequence_feature.py#L230-L232 | 2022-01-22T23:06:45 |
|
ludwig-ai/ludwig | 1,706 | ludwig-ai__ludwig-1706 | [
"1701"
] | 611addc9ec78d8edda47841cc883b819efbc717b | diff --git a/ludwig/features/base_feature.py b/ludwig/features/base_feature.py
--- a/ludwig/features/base_feature.py
+++ b/ludwig/features/base_feature.py
@@ -217,6 +217,7 @@ def __init__(self, feature: Dict[str, Any], other_output_features: Dict[str, "Ou
logger.debug(" FCStack")
self.input_size = get_input_size_with_dependencies(self.input_size, self.dependencies, other_output_features)
+ feature["input_size"] = self.input_size # needed for future overrides
self.fc_stack = FCStack(
first_layer_input_size=self.input_size,
diff --git a/ludwig/features/numerical_feature.py b/ludwig/features/numerical_feature.py
--- a/ludwig/features/numerical_feature.py
+++ b/ludwig/features/numerical_feature.py
@@ -343,8 +343,6 @@ class NumericalOutputFeature(NumericalFeatureMixin, OutputFeature):
def __init__(self, feature, output_features: Dict[str, OutputFeature]):
super().__init__(feature, output_features)
self.overwrite_defaults(feature)
-
- feature["input_size"] = self.input_shape[-1]
self.decoder_obj = self.initialize_decoder(feature)
self._setup_loss()
self._setup_metrics()
diff --git a/ludwig/features/set_feature.py b/ludwig/features/set_feature.py
--- a/ludwig/features/set_feature.py
+++ b/ludwig/features/set_feature.py
@@ -178,15 +178,13 @@ def output_shape(self) -> torch.Size:
class SetOutputFeature(SetFeatureMixin, OutputFeature):
decoder = "classifier"
- num_classes = 0
loss = {TYPE: SIGMOID_CROSS_ENTROPY}
metric_functions = {LOSS: None, JACCARD: None}
default_validation_metric = JACCARD
+ num_classes = 0
+ threshold = 0.5
def __init__(self, feature, output_features: Dict[str, OutputFeature]):
- self.num_classes = 0
- self.threshold = 0.5
-
super().__init__(feature, output_features)
self.overwrite_defaults(feature)
self.decoder_obj = self.initialize_decoder(feature)
| diff --git a/tests/integration_tests/test_experiment.py b/tests/integration_tests/test_experiment.py
--- a/tests/integration_tests/test_experiment.py
+++ b/tests/integration_tests/test_experiment.py
@@ -43,6 +43,7 @@
ENCODERS,
generate_data,
generate_output_features_with_dependencies,
+ generate_output_features_with_dependencies_complex,
h3_feature,
HF_ENCODERS,
HF_ENCODERS_SHORT,
@@ -238,6 +239,7 @@ def test_experiment_multilabel_with_class_weights(csv_filename):
generate_output_features_with_dependencies("sequence_feature", ["category_feature", "numerical_feature"]),
# output features with dependencies
generate_output_features_with_dependencies("category_feature", ["sequence_feature"]),
+ generate_output_features_with_dependencies_complex(),
],
)
def test_experiment_multiple_seq_seq(csv_filename, output_features):
diff --git a/tests/integration_tests/utils.py b/tests/integration_tests/utils.py
--- a/tests/integration_tests/utils.py
+++ b/tests/integration_tests/utils.py
@@ -365,6 +365,23 @@ def generate_output_features_with_dependencies(main_feature, dependencies):
return output_features
+def generate_output_features_with_dependencies_complex():
+ """Generates multiple output features specifications with dependencies."""
+
+ tf = text_feature(vocab_size=4, max_len=5, decoder="generator")
+ sf = sequence_feature(vocab_size=4, max_len=5, decoder="generator", dependencies=[tf["name"]])
+ nf = numerical_feature(dependencies=[tf["name"]])
+ vf = vector_feature(dependencies=[sf["name"], nf["name"]])
+ set_f = set_feature(vocab_size=4, dependencies=[tf["name"], vf["name"]])
+ cf = category_feature(vocab_size=4, dependencies=[sf["name"], nf["name"], set_f["name"]])
+
+ # The correct order ids[tf, sf, nf, vf, set_f, cf]
+ # # shuffling it to test the robustness of the topological sort
+ output_features = [nf, tf, set_f, vf, cf, sf, nf]
+
+ return output_features
+
+
def _subproc_wrapper(fn, queue, *args, **kwargs):
fn = cloudpickle.loads(fn)
try:
| Shape mismatch when introducing multiple levels of dependencies
**Describe the bug**
When introducing multiple levels of dependencies, the shape of the _concatenated hidden states_ does not match the _input size for the dense layer of the output feature_.
In my case, the text output feature `qty_frac` depends on text output feature `summary`, and numerical output feature `qty` in turn depends on `qty_frac`.
I get the following error when running `ludwig train`:
```python-traceback
RuntimeError: mat1 and mat2 shapes cannot be multiplied (6x768 and 512x1)
```
**To Reproduce**
Minimal, reproducible example using bash and docker as only dependencies:
```bash
#!/usr/bin/env bash
FEATURE_LIST=$(
docker run -i mikefarah/yq -o json -I 0 e '.' - <<EOF
- name: document
type: text
- name: summary
type: text
- name: qty_frac
type: text
- name: qty
type: numerical
EOF
)
mkdir /tmp/ludwig-debug
docker run \
-it \
-v /tmp/ludwig-debug/:/workdir \
ludwigai/ludwig:nightly \
synthesize_dataset \
--features $FEATURE_LIST \
--dataset_size 10 \
--output_path /workdir/synthetic_data.csv
cat <<EOF >/tmp/ludwig-debug/config.yml
input_features:
- name: document
type: text
level: word
output_features:
- name: summary
type: text
level: word
decoder: generator
- name: qty_frac
type: text
level: word
decoder: generator
dependencies:
- summary
- name: qty
type: numerical
dependencies:
- qty_frac
EOF
docker run \
-it \
-v /tmp/ludwig-debug/:/workdir \
ludwigai/ludwig:nightly \
train \
--dataset /workdir/synthetic_data.csv \
--config_file /workdir/config.yml \
--output_directory /workdir/results
```
**Expected behavior**
Training starts without error.
**Screenshots**
Excerpt from the traceback:
```python-traceback
File "/usr/local/lib/python3.7/site-packages/ludwig/features/numerical_feature.py", line 269, in logits
return self.decoder_obj(hidden)
File "/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.7/site-packages/ludwig/decoders/generic_decoders.py", line 58, in forward
return self.dense(inputs)
File "/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.7/site-packages/ludwig/utils/torch_utils.py", line 212, in forward
output = torch.squeeze(self.dense(input), dim=-1)
File "/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.7/site-packages/torch/nn/modules/linear.py", line 103, in forward
return F.linear(input, self.weight, self.bias)
File "/usr/local/lib/python3.7/site-packages/torch/nn/functional.py", line 1848, in linear
return torch._C._nn.linear(input, weight, bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (6x768 and 512x1)
```
**Environment:**
See reproducible example, run in environment with:
- bash: `GNU bash, version 5.0.17(1)-release (x86_64-pc-linux-gnu)`
- docker: `Docker version 20.10.11+azure-3, build dea9396e184290f638ea873c76db7c80efd5a1d2`
The `ludwigai/ludwig:nightly` Docker image was built from main at 89d18365c41c4ded68edd2095349ce4a6caf5d18.
| For reference, here's the [related conversation on Slack](https://ludwig-ai.slack.com/archives/C01PN6M2RSM/p1642806615331569).
After debugging, I found out that the `input_size` field of `TextOutputFeature` "qty_frac" is being overwritten after being correctly computed.
#### Defined multiple inheritance
`SequenceOutputFeature` inherits from `OutputFeature`:
https://github.com/ludwig-ai/ludwig/blob/65b3b1a7cdfc7149a2a8559eef1edbe4d38a3080/ludwig/features/sequence_feature.py#L215
`TextOutputFeature` in turn inherits from `SequenceOutputFeature`:
https://github.com/ludwig-ai/ludwig/blob/65b3b1a7cdfc7149a2a8559eef1edbe4d38a3080/ludwig/features/text_feature.py#L311
#### Modifications to `input_size`
1. In the constructor of `OutputFeature`, the input size is computed correctly based on dependencies:
https://github.com/ludwig-ai/ludwig/blob/65b3b1a7cdfc7149a2a8559eef1edbe4d38a3080/ludwig/features/base_feature.py#L219
2. In the constructor of `SequenceOutputFeature`, it gets overwritten with the default. The computed dependency information is lost.
https://github.com/ludwig-ai/ludwig/blob/65b3b1a7cdfc7149a2a8559eef1edbe4d38a3080/ludwig/features/sequence_feature.py#L230-L232 | 2022-01-23T23:27:31 |
nh-server/Kurisu | 780 | nh-server__Kurisu-780 | [
"777"
] | 08850695a7470e0c46b2f1e5e50768d2e84d609f | diff --git a/cogs/assistance.py b/cogs/assistance.py
--- a/cogs/assistance.py
+++ b/cogs/assistance.py
@@ -974,7 +974,7 @@ async def tutorial(self, ctx):
async def pokemon(self, ctx):
"""Displays different guides for Pokemon"""
embed = discord.Embed(title="Possible guides for **Pokemon**:", color=discord.Color.red())
- embed.description = "**pkhex**|**pkhax**|**pkgen** Links to PKHeX tutorial\n**randomize** Links to layeredfs randomizing tutorial"
+ embed.description = "**pkhex**|**pkhax**|**pkgen** Links to PKHeX tutorial\n**randomize** Links to layeredfs randomizing tutorial\n**pksm** Links to the PKSM documentation"
await ctx.send(embed=embed)
@tutorial.command(aliases=["pkhax", "pkgen"], cooldown=commands.Cooldown(0, 0, commands.BucketType.channel))
@@ -986,6 +986,15 @@ async def pkhex(self, ctx):
embed.description = "Basic tutorial for PKHeX"
await ctx.send(embed=embed)
+ @tutorial.command(cooldown=commands.Cooldown(0, 0, commands.BucketType.channel))
+ async def pksm(self, ctx):
+ """Links to PKSM Documentation"""
+ embed = discord.Embed(title="PKSM Documentation", color=discord.Color.red())
+ embed.set_thumbnail(url="https://raw.githubusercontent.com/FlagBrew/PKSM/master/assets/banner.png")
+ embed.url = "https://github.com/FlagBrew/PKSM/wiki"
+ embed.description = "Documentation for PKSM"
+ await ctx.send(embed=embed)
+
@tutorial.command(aliases=["randomise"], cooldown=commands.Cooldown(0, 0, commands.BucketType.channel))
async def randomize(self, ctx):
"""Links to layeredfs randomizing tutorial"""
| New command idea because idk how to make a pull request.
I was thinking of adding a command for pksm, because some users may have linix or macos. Maybe make the command .tutorial pksm, and have it link to https://github.com/FlagBrew/PKSM/wiki. Thanks for your time.
| 2020-06-15T11:34:21 |
||
nh-server/Kurisu | 972 | nh-server__Kurisu-972 | [
"967"
] | 5f93961b9d7f08ddca6a8fbf7176b37be0c7b633 | diff --git a/cogs/assistance.py b/cogs/assistance.py
--- a/cogs/assistance.py
+++ b/cogs/assistance.py
@@ -1681,7 +1681,7 @@ async def unsafe_mode(self, ctx):
@commands.command(aliases=['dn'])
async def downgrade(self, ctx, console=None):
"""Why not downgrade"""
- systems = ("nx", "ns", "switch")
+ systems = ("3ds", "nx", "ns", "switch")
channel_name = ctx.channel.name if not isinstance(ctx.channel, discord.DMChannel) else ""
if console not in systems:
if channel_name.startswith(systems):
@@ -1709,7 +1709,7 @@ async def downgrade(self, ctx, console=None):
* Save data compatibility issues.
* Games not launching.
"""))
- await ctx.send(embed=embed)
+ await ctx.send(embed=embed)
def setup(bot):
| Downgrade command
### Added the downgrade command
This use the same template as the `db` command.
Can be used with `.downgrade` or `.dn`.
*Only the switch is covered because I don't really know much about other devices. Maybe they could be added in the future, or this could be changed to a switch specific command.*

| 2021-07-06T06:43:35 |
||
nh-server/Kurisu | 1,068 | nh-server__Kurisu-1068 | [
"1067"
] | 2dcd0417fa60925ef7937e2147897d5b12de57ee | diff --git a/cogs/results/ctr_support.py b/cogs/results/ctr_support.py
--- a/cogs/results/ctr_support.py
+++ b/cogs/results/ctr_support.py
@@ -47,7 +47,9 @@
# 001: friends module, parental controls, online services in general?
friends = Module('friends', {
102: ResultInfo('This error code indicates you were unexpectedly disconnected from network services.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/17043'),
- 721: ResultInfo('This error code indicates the Parental Controls are set to restrict access to the online feature you\'re attempting to use.', 'https://www.nintendo.com.au/help/3ds-error-codes')
+ 721: ResultInfo('This error code indicates the Parental Controls are set to restrict access to the online feature you\'re attempting to use.', 'https://www.nintendo.com.au/help/3ds-error-codes'),
+ 803: ResultInfo('This error code indicates that the online play server is currently down.', 'https://www.nintendo.co.jp/netinfo/en_US/index.html'),
+ 811: ResultInfo('This error code indicates that the online play server is undergoing maintenance.', 'https://www.nintendo.co.jp/netinfo/en_US/index.html')
})
# 002: bans and other account errors
| [Error Submission] Error 001-0811, 001-0803
**Console**
3DS
**Error Code**
001-0811, 001-0803
**Error Description**
Online play server down.
**Notes**
001-0811 error message:
```
The server is currently undergoing maintenance. We apologize for any inconvenience. Please try again later.
To learn more about maintenance, visit support.nintendo.com.
```
001-0803 error message apparently displays that "server is down" rather than "undergoing maintenance".
| 2022-02-28T06:00:11 |
||
nh-server/Kurisu | 1,145 | nh-server__Kurisu-1145 | [
"1043"
] | 41b5b9377946d9be62c27506794a9edbed741244 | diff --git a/cogs/results/ctr_results.py b/cogs/results/ctr_results.py
--- a/cogs/results/ctr_results.py
+++ b/cogs/results/ctr_results.py
@@ -134,7 +134,7 @@
39: ResultInfo('Invalid title version.'),
43: ResultInfo('Database doesn\'t exist, or it failed to open.'),
44: ResultInfo('Trying to uninstall system-app.'),
- 106: ResultInfo('Invalid signature/CIA.'),
+ 106: ResultInfo('Invalid signature/CIA. Usually happens when developer UNITINFO is enabled in Luma3DS.'),
393: ResultInfo('Invalid database.'),
})
diff --git a/cogs/results/ctr_support.py b/cogs/results/ctr_support.py
--- a/cogs/results/ctr_support.py
+++ b/cogs/results/ctr_support.py
@@ -121,7 +121,7 @@
2920: ResultInfo('This error is typically displayed when a Nintendo eShop download failed, or when the title has an invalid ticket. Delete the title and/or its ticket in FBI and install it again from a legitimate source like the Nintendo eShop, or from your game cartridges if using cart dumps.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/41692'),
2924: ResultInfo('Happens when opening eshop with a invalid language setting'),
3049: ResultInfo('The eShop is down for maintenance.', 'https://support.nintendo.com/networkstatus/'),
- 6106: ResultInfo('Occurs when attempting to re-download software from the eshop with an invalid or fake ticket')
+ 6106: ResultInfo('Occurs when attempting to re-download software from the eshop with an invalid or fake ticket. Can also happen when developer UNITINFO is enabled in Luma3DS.')
})
# 009: errors related to (presumably) the eShop application itself
@@ -139,7 +139,7 @@
4077: ResultInfo('Cannot start or continue eShop download. This happens due to insufficient free space being available on the SD Card.'),
4079: ResultInfo('Unable to access SD card.'),
4998: ResultInfo('Local content is newer. Unknown what causes this.'),
- 6106: ResultInfo('AM error in NIM. Bad ticket is likely.'),
+ 6106: ResultInfo('AM error in NIM. Bad ticket is likely. Can also happen when developer UNITINFO is enabled in Luma3DS.'),
8401: ResultInfo('The update data is corrupted. Delete it and reinstall.'),
9001: ResultInfo('Caused by trying to download content with low battery percentage.')
})
diff --git a/cogs/results/wiiu_support.py b/cogs/results/wiiu_support.py
--- a/cogs/results/wiiu_support.py
+++ b/cogs/results/wiiu_support.py
@@ -265,6 +265,7 @@
1101: ResultInfo('Network clock is invalid.'),
2000: ResultInfo('Authentication error.'),
# TODO: 2001-2644 (there aren't really that many errors)
+ 2402: ResultInfo('Invalid NNID. Usually happens if the console is trying to connect to Nintendo servers with a Pretendo Network ID, or Pretendo Network with an NNID.'),
2643: ResultInfo('Authentication is required.'),
2651: ResultInfo('Confirmation code is expired.'),
2661: ResultInfo('Mail address is not validated.'),
@@ -442,6 +443,10 @@
2713: ResultInfo('The USB Storage device has been disconnected.')
})
+vpad_gamepad = Module('vpad (gamepad)', {
+ 9901: ResultInfo('Error when updating a GamePad with a different region from the console.')
+})
+
unknown = Module('unknown/misc.', {
9999: ResultInfo('Usually indicates an invalid signature, ticket, or corrupted data. Typically happens when running an unsigned program without CFW/signature patches.')
})
@@ -465,7 +470,7 @@
151: Module('kpad (wiimote)'),
155: Module('save'),
160: syserr,
- 165: Module('vpad (gamepad)'),
+ 165: vpad_gamepad,
166: Module('aoc (dlc)'),
187: Module('nfp (amiibo)'),
199: unknown
| [Error Submission] Error 165-9901
**Wii U**
<!--Your console, only Wiiu, 3DS, Switch supported-->
**165-9901**
<!--The error code you found-->
**Different Region Wii U Gamepad**
<!--What causes this error and what does it mean?-->
This error happens when a wii u gamepad update tries to be installed on a different gamepad region gamepad like:
Wii U Region: USA
Wii U Gamepad Region: EUR
Image of the error:

| 2022-11-08T04:39:06 |
||
nh-server/Kurisu | 1,257 | nh-server__Kurisu-1257 | [
"1256"
] | ea3b1d39d5fc90f9680a01b4b3c2c2a7f7c375ce | diff --git a/cogs/results/ctr_support.py b/cogs/results/ctr_support.py
--- a/cogs/results/ctr_support.py
+++ b/cogs/results/ctr_support.py
@@ -101,6 +101,7 @@
# 006: online matchmaking and gameplay errors
matchmaking = Module('matchmaking', {
112: ResultInfo('Typically displayed when an issue with connecting to Pokémon Bank occurs.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/4203/'),
+ 303: ResultInfo('Displayed when the console tried to log in with an invalid user name, i.e. the user name does not exist in the database.'),
332: ResultInfo('Caused by closed ports when attempting matchmaking(?)'),
(501, 502): ResultInfo('This may indicate in issue with the network being used blocking traffic necessary for online play.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/4204'),
612: ResultInfo('This error code generally indicates that your network is not optimal for peer to peer connections, likely due to your network\'s NAT type.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/25881'),
| [Error Submission] Error 006-0303
**Console**
3DS
**Error Code**
006-0303
**Error Description**
Unable to connect to the server. Please try again later.
| Related: https://pretendo.network/blog/12-23-23
Wii U error code: 106-0303 | 2023-12-24T17:14:05 |
|
zostera/django-bootstrap4 | 100 | zostera__django-bootstrap4-100 | [
"95"
] | c513e0f7876671b44dc11f17acb4b4317e0fe1a0 | diff --git a/bootstrap4/renderers.py b/bootstrap4/renderers.py
--- a/bootstrap4/renderers.py
+++ b/bootstrap4/renderers.py
@@ -331,6 +331,8 @@ def add_class_attrs(self, widget=None):
classes = add_css_class(classes, "form-control", prepend=True)
# For these widget types, add the size class here
classes = add_css_class(classes, self.get_size_class())
+ elif isinstance(widget, CheckboxInput):
+ classes = add_css_class(classes, "form-check-input", prepend=True)
if self.field.errors:
if self.error_css_class:
@@ -392,13 +394,11 @@ def list_to_class(self, html, klass):
return html
def add_checkbox_label(self, html):
- return "{field}{label}".format(
- field=html,
- label=render_label(
- content=mark_safe(self.field.label),
- label_for=self.field.id_for_label,
- label_title=escape(strip_tags(self.field_help)),
- ),
+ return html + render_label(
+ content=self.field.label,
+ label_for=self.field.id_for_label,
+ label_title=escape(strip_tags(self.field_help)),
+ label_class="form-check-label",
)
def fix_date_select_input(self, html):
@@ -445,7 +445,7 @@ def wrap_widget(self, html):
if isinstance(self.widget, CheckboxInput):
# Wrap checkboxes
# Note checkboxes do not get size classes, see #318
- html = '<div class="checkbox">{content}</div>'.format(content=html)
+ html = '<div class="form-check">{content}</div>'.format(content=html)
return html
def make_input_group_addon(self, inner_class, outer_class, content):
@@ -486,7 +486,7 @@ def make_input_group(self, html):
)
return html
- def append_to_field(self, html):
+ def append_help_and_error(self, html):
field_help = self.field_help or None
field_errors = self.field_errors
if field_help or field_errors:
@@ -503,6 +503,16 @@ def append_to_field(self, html):
html += help_html
return html
+ def append_to_field(self, html):
+ if isinstance(self.widget, CheckboxInput):
+ return html
+ return self.append_help_and_error(html)
+
+ def append_to_checkbox_field(self, html):
+ if not isinstance(self.widget, CheckboxInput):
+ return html
+ return self.append_help_and_error(html)
+
def get_field_class(self):
field_class = self.field_class
if not field_class and self.layout == "horizontal":
@@ -581,6 +591,7 @@ def _render(self):
self.restore_widget_attrs()
# Start post render
html = self.post_widget_render(html)
+ html = self.append_to_checkbox_field(html)
html = self.wrap_widget(html)
html = self.make_input_group(html)
html = self.append_to_field(html)
| diff --git a/tests/test_templatetags.py b/tests/test_templatetags.py
--- a/tests/test_templatetags.py
+++ b/tests/test_templatetags.py
@@ -2,6 +2,8 @@
from __future__ import unicode_literals
import re
+
+from bs4 import BeautifulSoup
from django import forms
from django.contrib.admin.widgets import AdminSplitDateTime
from django.contrib.gis import forms as gisforms
@@ -441,6 +443,62 @@ def test_password(self):
self.assertIn('type="password"', res)
self.assertIn('placeholder="Password"', res)
+ def test_checkbox(self):
+ """Test Checkbox rendering, because it is special."""
+ def _select_one_element(html, selector, err_msg):
+ lst = html.select(selector)
+ self.assertEqual(len(lst), 1, err_msg)
+ return lst[0]
+
+ res = render_form_field("cc_myself")
+ # strip out newlines and spaces around newlines
+ res = "".join(line.strip() for line in res.split('\n'))
+ res = BeautifulSoup(res, 'html.parser')
+ form_group = _select_one_element(
+ res,
+ ".form-group",
+ "Checkbox should be rendered inside a .form-group.",
+ )
+ form_check = _select_one_element(
+ form_group,
+ ".form-check",
+ "There should be a .form-check inside .form-group",
+ )
+ checkbox = _select_one_element(
+ form_check,
+ "input",
+ "The checkbox should be inside the .form-check",
+ )
+ self.assertIn(
+ "form-check-input",
+ checkbox["class"],
+ "The checkbox should have the class 'form-check-input'.",
+ )
+ label = checkbox.nextSibling
+ self.assertIsNotNone(label, "The label should be rendered after the checkbox.")
+ self.assertEqual(label.name, "label", "After the checkbox there should be a label.")
+ self.assertEqual(
+ label["for"],
+ checkbox["id"],
+ "The for attribute of the label should be the id of the checkbox.",
+ )
+ help_text = label.nextSibling
+ self.assertIsNotNone(help_text, "The help text should be rendered after the label.")
+ self.assertEqual(
+ help_text.name,
+ "small", "The help text should be rendered as <small> tag.",
+ )
+ self.assertIn(
+ "form-text",
+ help_text["class"],
+ "The help text should have the class 'form-text'.",
+ )
+ self.assertIn(
+ "text-muted",
+ help_text["class"],
+ "The help text should have the class 'text-muted'.",
+ )
+
def test_required_field(self):
required_css_class = "bootstrap4-req"
required_field = render_form_field("subject")
@@ -718,13 +776,13 @@ def test_for_formset(self):
class PaginatorTest(TestCase):
def test_url_replace_param(self):
- self.assertEquals(
+ self.assertEqual(
url_replace_param("/foo/bar?baz=foo", "baz", "yohoo"), "/foo/bar?baz=yohoo"
)
- self.assertEquals(
+ self.assertEqual(
url_replace_param("/foo/bar?baz=foo", "baz", None), "/foo/bar"
)
- self.assertEquals(
+ self.assertEqual(
url_replace_param("/foo/bar#id", "baz", "foo"), "/foo/bar?baz=foo#id"
)
| Different div class for checkboxes
There is a different div class in the Bootstrap's docs for checkboxes.
https://getbootstrap.com/docs/4.1/components/forms/#overview
Now it's "form-group form-check" but the library generates only the "form-group" div.
| For Bootstrap 4.0 (https://getbootstrap.com/docs/4.0/components/forms/#overview) it is only the "form-check" div class.
Alternatively you can use the [custom checkbox](https://getbootstrap.com/docs/4.1/components/forms/#checkboxes) like this:
```html
<div class="custom-control custom-checkbox">
<input type="checkbox" class="custom-control-input" id="id_test_checkbox">
<label class="custom-control-label" for="id_test_checkbox">Check this custom checkbox</label>
</div>
``` | 2018-07-14T22:22:39 |
zostera/django-bootstrap4 | 114 | zostera__django-bootstrap4-114 | [
"105"
] | 727a59ca4312c6ee0b91c51566eab35bb585ac5c | diff --git a/bootstrap4/bootstrap.py b/bootstrap4/bootstrap.py
--- a/bootstrap4/bootstrap.py
+++ b/bootstrap4/bootstrap.py
@@ -8,12 +8,16 @@
# Default settings
BOOTSTRAP4_DEFAULTS = {
- "base_url": None, # 'https://maxcdn.bootstrapcdn.com/bootstrap/4.1.1/'
"css_url": {
"href": "https://stackpath.bootstrapcdn.com/bootstrap/4.1.1/css/bootstrap.min.css",
"integrity": "sha384-WskhaSGFgHYWDcbwN70/dfYBj47jz9qbsMId/iRN3ewGhXQFZCSftd1LZCfmhktB",
"crossorigin": "anonymous",
},
+ "javascript_url": {
+ "url": "https://stackpath.bootstrapcdn.com/bootstrap/4.1.1/js/bootstrap.min.js",
+ "integrity": "sha384-smHYKdLADwkXOn1EmN1qk/HfnUcbVRZyYmZ4qpPea6sjB/pTJ0euyQp0Mk8ck+5T",
+ "crossorigin": "anonymous",
+ },
"theme_url": None,
"jquery_url": {
"url": "https://code.jquery.com/jquery-3.3.1.min.js",
@@ -30,11 +34,6 @@
"integrity": "sha384-ZMP7rVo3mIykV+2+9J3UJ46jBk0WLaUAdn689aCwoqbBJiSnjAK/l8WvCWPIPm49",
"crossorigin": "anonymous",
},
- "javascript_url": {
- "url": "https://stackpath.bootstrapcdn.com/bootstrap/4.1.1/js/bootstrap.min.js",
- "integrity": "sha384-smHYKdLADwkXOn1EmN1qk/HfnUcbVRZyYmZ4qpPea6sjB/pTJ0euyQp0Mk8ck+5T",
- "crossorigin": "anonymous",
- },
"javascript_in_head": False,
"include_jquery": False,
"use_i18n": False,
@@ -69,13 +68,6 @@ def get_bootstrap_setting(name, default=None):
return BOOTSTRAP4.get(name, default)
-def bootstrap_url(postfix):
- """
- Prefix a relative url with the bootstrap base url
- """
- return get_bootstrap_setting("base_url") + postfix
-
-
def jquery_url():
"""
Return the full url to jQuery library file to use
@@ -110,16 +102,14 @@ def javascript_url():
"""
Return the full url to the Bootstrap JavaScript file
"""
- url = get_bootstrap_setting("javascript_url")
- return url if url else bootstrap_url("js/bootstrap.min.js")
+ return get_bootstrap_setting("javascript_url")
def css_url():
"""
Return the full url to the Bootstrap CSS file
"""
- url = get_bootstrap_setting("css_url")
- return url if url else bootstrap_url("css/bootstrap.min.css")
+ return get_bootstrap_setting("css_url")
def theme_url():
| diff --git a/tests/test_templatetags.py b/tests/test_templatetags.py
--- a/tests/test_templatetags.py
+++ b/tests/test_templatetags.py
@@ -232,17 +232,6 @@ def test_bootstrap_css_tag(self):
'<link rel="stylesheet" href="//example.com/theme.css">', html
)
- @override_settings(BOOTSTRAP4={"base_url": "//example.com/", "css_url": None})
- def test_bootstrap_css_from_base_url(self):
- self.assertEqual(
- render_template_with_form("{% bootstrap_css_url %}").strip(),
- "//example.com/css/bootstrap.min.css",
- )
- self.assertInHTML(
- '<link href="//example.com/css/bootstrap.min.css" rel="stylesheet">',
- render_template_with_form("{% bootstrap_css %}").strip(),
- )
-
def test_settings_filter(self):
res = render_template_with_form('{{ "required_css_class"|bootstrap_setting }}')
self.assertEqual(res.strip(), "bootstrap4-req")
| Setting base_url is not used
I tried to change the used bootstrap4 version. So I added to following dict to the settings.py:
`BOOTSTRAP4 = {
# The Bootstrap base URL
'base_url': '//maxcdn.bootstrapcdn.com/bootstrap/4.1.3/',
}`
But the tag {% bootstrap_css %} still includes version 4.0.0:
`<link crossorigin="anonymous" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" rel="stylesheet">`
| Do you have a `css_url` defined in settings?
No, the `css_url` is not set in my settings file. As I understod the documentation, if I didn't set a `css_url` the url will be build from the `base_url`. Is this correct?
Yes, that's why I asked. I'll investigate.
Could you try if it works if you *explicitly* set`css_url` to `None`? I think the problem is that there is a default value for `css_url`.
I've confirmed this is due to a change in the defaults. The CDN version of BS4 used to be provided by a `base_url`. In order to accommodate crossorigin/integrity, the defaults are now in a `css_url` and `javascript_url`.
This will require a fix, and the fix may require some discussion.
Workaround for now: explicitly set `css_url` to `None` in your settings, and your `base_url` will work.
Suggestion (thanks @jieter). Let's get rid of `base_url` as a magic way to include stuff, and just have an explicit `javascript_url` and `css_url` that default to the CDN. This would be a breaking change.
@pacnos Would you that be an acceptable direction for you? | 2018-08-28T18:06:18 |
zostera/django-bootstrap4 | 117 | zostera__django-bootstrap4-117 | [
"115"
] | fb20ec895c08a6397bcbb90b4f7ee6ed2d19f89e | diff --git a/bootstrap4/renderers.py b/bootstrap4/renderers.py
--- a/bootstrap4/renderers.py
+++ b/bootstrap4/renderers.py
@@ -533,12 +533,14 @@ def get_label_class(self):
label_class = self.horizontal_label_class
label_class = add_css_class(label_class, "col-form-label")
label_class = text_value(label_class)
- if not self.show_label:
+ if not self.show_label or self.show_label == "sr-only":
label_class = add_css_class(label_class, "sr-only")
return label_class
def get_label(self):
- if isinstance(self.widget, CheckboxInput):
+ if self.show_label == "skip":
+ return None
+ elif isinstance(self.widget, CheckboxInput):
label = None
else:
label = self.field.label
diff --git a/bootstrap4/templatetags/bootstrap4.py b/bootstrap4/templatetags/bootstrap4.py
--- a/bootstrap4/templatetags/bootstrap4.py
+++ b/bootstrap4/templatetags/bootstrap4.py
@@ -529,6 +529,10 @@ def bootstrap_field(*args, **kwargs):
show_label
Whether the show the label of the field.
+ * ``True``
+ * ``False``/``'sr-only'``
+ * ``'skip'``
+
:default: ``True``
exclude
| diff --git a/tests/test_templatetags.py b/tests/test_templatetags.py
--- a/tests/test_templatetags.py
+++ b/tests/test_templatetags.py
@@ -770,13 +770,27 @@ def test_button(self):
class ShowLabelTest(TestCase):
- def test_show_label(self):
+ def test_show_label_false(self):
form = TestForm()
res = render_template_with_form(
"{% bootstrap_form form show_label=False %}", {"form": form}
)
self.assertIn("sr-only", res)
+ def test_show_label_sr_only(self):
+ form = TestForm()
+ res = render_template_with_form(
+ "{% bootstrap_form form show_label='sr-only' %}", {"form": form}
+ )
+ self.assertIn("sr-only", res)
+
+ def test_show_label_skip(self):
+ form = TestForm()
+ res = render_template_with_form(
+ "{% bootstrap_form form show_label='skip' %}", {"form": form}
+ )
+ self.assertNotIn("<label>", res)
+
def test_for_formset(self):
TestFormSet = formset_factory(TestForm, extra=1)
test_formset = TestFormSet()
| Feature request: Allow to skip label from field completely
```
{% bootstrap_field field show_label=False %}
```
currently still renders the label, but with `class="sr-only"`. This is great for most cases, but it is actually a bad idea if the users manually adds the label separately. In that case, both labels will be associated with the form field and the computed aria-label will be "{label} {label}".
| I agree on this. Can you propose an API for this using a pull request?
A straigtforward API would be `show_label=True|False|'sr-only'`, but that would be a breaking change.
Maybe `show_label='default'|'sr-only'|'hidden'` where `True` maps to `'default'` and `False` maps to `'sr-only'` for backwards compatibility.
Not sure about 'hidden' since it implies the current `False` to me... | 2018-09-05T21:33:48 |
zostera/django-bootstrap4 | 122 | zostera__django-bootstrap4-122 | [
"98"
] | de77ccc74ee9a8507d8e7e279866833caf5af6f5 | diff --git a/bootstrap4/renderers.py b/bootstrap4/renderers.py
--- a/bootstrap4/renderers.py
+++ b/bootstrap4/renderers.py
@@ -435,7 +435,8 @@ def make_input_group_addon(self, inner_class, outer_class, content):
addon_class=outer_class, addon=content
)
- def make_input_group(self, html):
+ @property
+ def is_input_group(self):
allowed_widget_types = (
TextInput,
PasswordInput,
@@ -444,10 +445,14 @@ def make_input_group(self, html):
Select,
EmailInput,
)
- if (self.addon_before or self.addon_after) and isinstance(
- self.widget, allowed_widget_types
- ):
- html = '<div class="input-group">{before}{html}{after}</div>'.format(
+ return (
+ (self.addon_before or self.addon_after)
+ and isinstance(self.widget, allowed_widget_types)
+ )
+
+ def make_input_group(self, html):
+ if self.is_input_group:
+ html = '{before}{html}{after}'.format(
before=self.make_input_group_addon(
self.addon_before_class, "input-group-prepend", self.addon_before
),
@@ -456,18 +461,18 @@ def make_input_group(self, html):
),
html=html,
)
+ html = self.append_errors(html)
+ html = '<div class="input-group">{html}</div>'.format(html=html)
return html
- def append_help_and_error(self, html):
+ def append_help(self, html):
field_help = self.field_help or None
- field_errors = self.field_errors
- if field_help or field_errors:
+ if field_help:
help_html = render_template_file(
- "bootstrap4/field_help_text_and_errors.html",
+ "bootstrap4/field_help_text.html",
context={
"field": self.field,
"field_help": field_help,
- "field_errors": field_errors,
"layout": self.layout,
"show_help": self.show_help,
},
@@ -475,15 +480,40 @@ def append_help_and_error(self, html):
html += help_html
return html
+ def append_errors(self, html):
+ field_errors = self.field_errors
+ if field_errors:
+ errors_html = render_template_file(
+ "bootstrap4/field_errors.html",
+ context={
+ "field": self.field,
+ "field_errors": field_errors,
+ "layout": self.layout,
+ "show_help": self.show_help,
+ },
+ )
+ html += errors_html
+ return html
+
def append_to_field(self, html):
if isinstance(self.widget, CheckboxInput):
+ # we have already appended errors and help to checkboxes
+ # in append_to_checkbox_field
return html
- return self.append_help_and_error(html)
+
+ if not self.is_input_group:
+ # we already appended errors for input groups in make_input_group
+ html = self.append_errors(html)
+
+ return self.append_help(html)
def append_to_checkbox_field(self, html):
if not isinstance(self.widget, CheckboxInput):
+ # we will append errors and help to normal fields later in append_to_field
return html
- return self.append_help_and_error(html)
+
+ html = self.append_errors(html)
+ return self.append_help(html)
def get_field_class(self):
field_class = self.field_class
| diff --git a/tests/test_templatetags.py b/tests/test_templatetags.py
--- a/tests/test_templatetags.py
+++ b/tests/test_templatetags.py
@@ -114,14 +114,20 @@ def render_template_with_bootstrap(text, context=None):
return render_template("{% load bootstrap4 %}" + text, context)
-def render_template_with_form(text, context=None):
+def render_template_with_form(text, context=None, data=None):
"""
Create a template ``text`` that first loads bootstrap4.
+
+ When ``data`` is given, the form will be initialized with data and
+ form.is_valid() will be called in order to enable validations.
"""
if not context:
context = {}
if "form" not in context:
- context["form"] = TestForm()
+ form = TestForm(data=data)
+ if data:
+ form.is_valid()
+ context["form"] = form
return render_template_with_bootstrap(text, context)
@@ -402,6 +408,17 @@ def test_radio_select_button_group(self):
class FieldTest(TestCase):
+
+ def _select_one_element(self, html, selector, err_msg):
+ """
+ Select exactly one html element in an BeautifulSoup html fragment.
+
+ Fail if there is not exactly one element.
+ """
+ lst = html.select(selector)
+ self.assertEqual(len(lst), 1, err_msg)
+ return lst[0]
+
def test_illegal_field(self):
with self.assertRaises(BootstrapError):
render_field(field="illegal")
@@ -444,23 +461,19 @@ def test_password(self):
def test_checkbox(self):
"""Test Checkbox rendering, because it is special."""
-
- def _select_one_element(html, selector, err_msg):
- lst = html.select(selector)
- self.assertEqual(len(lst), 1, err_msg)
- return lst[0]
-
res = render_form_field("cc_myself")
# strip out newlines and spaces around newlines
res = "".join(line.strip() for line in res.split("\n"))
res = BeautifulSoup(res, "html.parser")
- form_group = _select_one_element(
+ form_group = self._select_one_element(
res, ".form-group", "Checkbox should be rendered inside a .form-group."
)
- form_check = _select_one_element(
- form_group, ".form-check", "There should be a .form-check inside .form-group"
+ form_check = self._select_one_element(
+ form_group,
+ ".form-check",
+ "There should be a .form-check inside .form-group",
)
- checkbox = _select_one_element(
+ checkbox = self._select_one_element(
form_check, "input", "The checkbox should be inside the .form-check"
)
self.assertIn(
@@ -542,6 +555,43 @@ def test_input_group_addon_empty(self):
'<div class="input-group-append"><span class="input-group-text">after</span></div>', res
)
+ def test_input_group_addon_validation(self):
+ """
+ Test that invalid-feedback messages are placed inside input-groups.
+
+ See issue #89.
+ """
+ # invalid form data:
+ data = {'subject': ''}
+ res = render_template_with_form(
+ '{% bootstrap_field form.subject addon_before=None addon_after="after" %}',
+ data=data,
+ ) # noqa
+ res = BeautifulSoup(res, "html.parser")
+ self._select_one_element(
+ res,
+ '.input-group > .invalid-feedback',
+ 'The invalid-feedback message, complaining that this field is '
+ 'required, must be placed inside the input-group',
+ )
+ self._select_one_element(
+ res,
+ '.form-group > .form-text',
+ 'The form-text message must be placed inside the form-group',
+ )
+ self.assertEqual(
+ len(res.select('.form-group > .invalid-feedback')),
+ 0,
+ 'The invalid-feedback message must be placed inside the '
+ 'input-group and not inside the form-group',
+ )
+ self.assertEqual(
+ len(res.select('.input-group > .form-text')),
+ 0,
+ 'The form-text message must be placed inside the form-group and '
+ 'not inside the input-group',
+ )
+
def test_size(self):
def _test_size(param, klass):
res = render_template_with_form(
| Field error does not render when addon_before or addon_after is used
Field error does not render when addon_before or addon_after is used, though it renders the error css style (in-valid) but the error message does not show
Example
`{% bootstrap_field form.user addon_after="<div class='side-pop-btn-group'><a href='javascript:;' data-type='user-edit' class='p-2 text-warning' data-toggle='tooltip' title='Edit User'><i class='fal fa-pencil-alt'></i></a><a href='javascript:;' class='p-2 text-success' data-type='user-add' data-toggle='tooltip' title='Add User'><i class='fas fa-plus'></i></a><a href='javascript:;' class='p-2 text-danger' data-type='user-delete' data-toggle='tooltip' title='Delete User'><i class='fas fa-times'></i></a></div>" %}
`
| 2018-10-23T22:24:27 |
|
zostera/django-bootstrap4 | 135 | zostera__django-bootstrap4-135 | [
"134"
] | 16a33caa40110fed31f2d40a3c156dc432201651 | diff --git a/bootstrap4/renderers.py b/bootstrap4/renderers.py
--- a/bootstrap4/renderers.py
+++ b/bootstrap4/renderers.py
@@ -266,7 +266,7 @@ def __init__(self, field, *args, **kwargs):
# Or just set it to empty
self.placeholder = ""
if self.placeholder:
- self.placeholder = text_value(mark_safe(self.placeholder))
+ self.placeholder = text_value(self.placeholder)
self.addon_before = kwargs.get(
"addon_before", self.widget.attrs.pop("addon_before", "")
| diff --git a/tests/test_templatetags.py b/tests/test_templatetags.py
--- a/tests/test_templatetags.py
+++ b/tests/test_templatetags.py
@@ -40,7 +40,11 @@ class TestForm(forms.Form):
max_length=100,
help_text="my_help_text",
required=True,
- widget=forms.TextInput(attrs={"placeholder": "placeholdertest"}),
+ widget=forms.TextInput(attrs={"placeholder": "placeholdertest"})
+ )
+ xss_field = forms.CharField(
+ label='XSS" onmouseover="alert(\'Hello, XSS\')" foo="',
+ max_length=100,
)
password = forms.CharField(widget=forms.PasswordInput)
message = forms.CharField(required=False, help_text="<i>my_help_text</i>")
@@ -442,6 +446,18 @@ def test_subject(self):
self.assertIn('type="text"', res)
self.assertIn('placeholder="placeholdertest"', res)
+ def test_xss_field(self):
+ res = render_form_field("xss_field")
+ self.assertIn('type="text"', res)
+ self.assertIn(
+ '<label for="id_xss_field">XSS" onmouseover="alert('Hello, XSS')" foo="</label>',
+ res,
+ )
+ self.assertIn(
+ 'placeholder="XSS" onmouseover="alert('Hello, XSS')" foo=""',
+ res,
+ )
+
def test_password(self):
res = render_form_field("password")
self.assertIn('type="password"', res)
| XSS in placeholder values
Placeholder values are run through `mark_safe` when building form widgets. That's not a good idea, as it permits XSS content. As placeholders are often generated automatically from labels, and labels are properly escaped, this is unexpected behaviour.
This behaviour was introduced in 5b98fd5e5a750bb8588eb92569db324b736c736e. The commit doesn't give a reason for the introduction (and neither does the PR at https://github.com/dyve/django-bootstrap3/pull/385).
I've included a fix in #135, including tests so you can see the behaviour for yourself.
| 2019-02-24T16:00:26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.