repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
readthedocs/readthedocs.org | 9,860 | readthedocs__readthedocs.org-9860 | [
"8301"
] | a8e88277353460269873fa7e38789c8c7d2e4dee | diff --git a/readthedocs/embed/v3/views.py b/readthedocs/embed/v3/views.py
--- a/readthedocs/embed/v3/views.py
+++ b/readthedocs/embed/v3/views.py
@@ -1,6 +1,7 @@
"""Views for the EmbedAPI v3 app."""
import re
+import urllib.parse
from urllib.parse import urlparse
import requests
@@ -90,11 +91,16 @@ def _get_page_content_from_storage(self, project, version_slug, filename):
include_file=False,
version_type=version.type,
)
+
+ # Decode encoded URLs (e.g. convert %20 into a whitespace)
+ filename = urllib.parse.unquote(filename)
+
relative_filename = filename.lstrip("/")
file_path = build_media_storage.join(
storage_path,
relative_filename,
)
+
try:
with build_media_storage.open(file_path) as fd: # pylint: disable=invalid-name
return fd.read()
| diff --git a/readthedocs/embed/v3/tests/test_internal_pages.py b/readthedocs/embed/v3/tests/test_internal_pages.py
--- a/readthedocs/embed/v3/tests/test_internal_pages.py
+++ b/readthedocs/embed/v3/tests/test_internal_pages.py
@@ -78,3 +78,20 @@ def test_default_main_section(self, storage_exists, storage_open, app, client):
'content': content,
'external': False,
}
+
+ @pytest.mark.sphinx("html", srcdir=srcdir, freshenv=False)
+ @mock.patch("readthedocs.embed.v3.views.build_media_storage.open")
+ @mock.patch("readthedocs.embed.v3.views.build_media_storage.exists")
+ def test_s3_storage_decoded_filename(
+ self, storage_exists, storage_open, app, client
+ ):
+ storage_exists.return_value = True
+ storage_open.side_effect = self._mock_open('<div id="section">content</div>')
+
+ params = {
+ "url": "https://project.readthedocs.io/en/latest/My%20Spaced%20File.html#section",
+ }
+ response = client.get(self.api_url, params)
+ assert response.status_code == 200
+
+ storage_open.assert_called_once_with("html/project/latest/My Spaced File.html")
| Embed API: gets confused with spaces in the URL
Reading #8283 I found that Embed API gets confused with this link: https://github.com/readthedocs/readthedocs.org/pull/8283/files#diff-30a1066ce8ef6760deed6b1dcb492eda05b8dd6cf71dadddab6c797c874be6edR121
In the JS console I see https://readthedocs.org/api/v2/embed/?url=https%3A%2F%2Fipywidgets.readthedocs.io%2Fen%2F7.6.3%2Fexamples%2FWidget%2520List.html%23IntSlider which fails to retrieve with the error:
```
Can't find content for section: doc=examples/Widget%20List path=examples/Widget%20List.html section=IntSlider
```
However, changing it to https://readthedocs.org/api/v2/embed/?url=https://ipywidgets.readthedocs.io/en/7.6.3/examples/Widget%20List.html%23IntSlider it does work and _it does return content_.
| Found the issue! 💪🏼
Encoded whitespaces on filenames break when they are retrieved from the S3 storage. The following code works fine,
```python
file_path = 'html/ipywidgets/7.6.3/examples/Widget List.html'
build_media_storage.open(file_path)
```
but encoding the ` ` as `%20` does not:
```python
file_path = 'html/ipywidgets/7.6.3/examples/Widget%20List.html'
build_media_storage.open(file_path)
```
So, the solution here is to _decode_ the `file_path` before passing it to the S3 storage backend.
This happens at https://github.com/readthedocs/readthedocs.org/blob/a8e88277353460269873fa7e38789c8c7d2e4dee/readthedocs/embed/v3/views.py#L99 | 2023-01-04T15:12:17 |
readthedocs/readthedocs.org | 9,962 | readthedocs__readthedocs.org-9962 | [
"9935"
] | bfc2c2d03bbd0ad91e03d83346b7f64df88ee7d9 | diff --git a/readthedocs/doc_builder/backends/mkdocs.py b/readthedocs/doc_builder/backends/mkdocs.py
--- a/readthedocs/doc_builder/backends/mkdocs.py
+++ b/readthedocs/doc_builder/backends/mkdocs.py
@@ -295,10 +295,19 @@ def build(self):
'-m',
'mkdocs',
self.builder,
- '--clean',
- '--site-dir',
- self.build_dir,
- '--config-file',
+ "--clean",
+ # ``site_dir`` is relative to where the mkdocs.yaml file is
+ # https://www.mkdocs.org/user-guide/configuration/#site_dir
+ # Example:
+ #
+ # when ``--config-file=docs/mkdocs.yml``,
+ # it must be ``--site-dir=../_readthedocs/html``
+ "--site-dir",
+ os.path.join(
+ os.path.relpath(self.project_path, os.path.dirname(self.yaml_file)),
+ self.build_dir,
+ ),
+ "--config-file",
os.path.relpath(self.yaml_file, self.project_path),
]
if self.config.mkdocs.fail_on_warning:
| diff --git a/readthedocs/projects/tests/test_build_tasks.py b/readthedocs/projects/tests/test_build_tasks.py
--- a/readthedocs/projects/tests/test_build_tasks.py
+++ b/readthedocs/projects/tests/test_build_tasks.py
@@ -1257,7 +1257,7 @@ def test_mkdocs_fail_on_warning(self, load_yaml_config):
"build",
"--clean",
"--site-dir",
- "_readthedocs/html",
+ "../_readthedocs/html",
"--config-file",
"docs/mkdocs.yaml",
"--strict", # fail on warning flag
| MkDocs: artifacts are not found when YAML file is not on root directory
## Details
* Read the Docs project URL: https://readthedocs.org/projects/civicactions-handbook/
* Read the Docs username (if applicable): https://readthedocs.org/profiles/dmundra/
## Expected Result
*A description of what you wanted to happen*
We expect that pull request sites are loaded and the changes merged to the main branch are loaded on the site as well.
## Actual Result
*A description of what actually happened*
For the first issue, here is the latest pull request example: https://github.com/CivicActions/guidebook/pull/1033. We reset the rtd token as well but that didn't matter. The build passes https://readthedocs.org/projects/civicactions-handbook/builds/19264729/ but no site has been created.
The second issue most recently https://readthedocs.org/projects/civicactions-handbook/builds/19264408/ we published some changes, but those changes do not appear on the live site https://guidebook.civicactions.com/en/latest/about-civicactions/diversity-equity-inclusion/resources/
Any recommendations for troubleshooting?
| > For the first issue, here is the latest pull request example: [CivicActions/guidebook#1033](https://github.com/CivicActions/guidebook/pull/1033). We reset the rtd token as well but that didn't matter. The build passes [readthedocs.org/projects/civicactions-handbook/builds/19264729](https://readthedocs.org/projects/civicactions-handbook/builds/19264729/) but no site has been created.
This looks like a temporal issue, I'd say. I just opened a testing pr at https://github.com/readthedocs/test-builds/pull/2095, triggered this build https://readthedocs.org/projects/test-builds/builds/19267182/ and generated this documentation https://test-builds--2095.org.readthedocs.build/en/2095/
Can you please take a look at the suggestion from https://docs.readthedocs.io/en/latest/pull-requests.html#troubleshooting just in case?
> The second issue most recently [readthedocs.org/projects/civicactions-handbook/builds/19264408](https://readthedocs.org/projects/civicactions-handbook/builds/19264408/) we published some changes, but those changes do not appear on the live site [guidebook.civicactions.com/en/latest/about-civicactions/diversity-equity-inclusion/resources](https://guidebook.civicactions.com/en/latest/about-civicactions/diversity-equity-inclusion/resources/)
What are the differences we should be checking for? Can you point us at specific page where the updated content should be there and it's not?
Thanks for the response. Yes, we tried the troubleshooting steps and I a triggered a build this morning to confirm if it was temporal issue but the builds are not progressing passed the triggered state (for both latest and the pull request), builds:
- https://readthedocs.org/projects/civicactions-handbook/builds/19271175/
- https://readthedocs.org/projects/civicactions-handbook/builds/19271225/
> What are the differences we should be checking for? Can you point us at specific page where the updated content should be there and it's not?
Ya, we are looking for changes from this commit https://github.com/CivicActions/guidebook/commit/a614e7b1aaa30a9016c66cd7e4238338ffcc1451 to appear on this page https://guidebook.civicactions.com/en/latest/about-civicactions/diversity-equity-inclusion/resources/. For example we should see 'Code of conduct' text and we don't.
Here is another pull request https://github.com/CivicActions/guidebook/pull/1034 that generated a build but the site doesn't appear https://civicactions-handbook--1034.org.readthedocs.build/en/1034/
@dmundra thanks for providing feedback here!
We're investigating a possible regression related to your use of a custom MkDocs configuration path `.config/mkdocs.yml`, it seems that it has affected something after a recent change to builders. We'll get back with more info :+1:
Thanks @benjaoming for the update. | 2023-01-30T18:19:22 |
readthedocs/readthedocs.org | 9,967 | readthedocs__readthedocs.org-9967 | [
"9886"
] | 21a40332a61811894aa66569e6d3c12de7bfbcb4 | diff --git a/readthedocs/doc_builder/backends/sphinx.py b/readthedocs/doc_builder/backends/sphinx.py
--- a/readthedocs/doc_builder/backends/sphinx.py
+++ b/readthedocs/doc_builder/backends/sphinx.py
@@ -336,38 +336,6 @@ def sphinx_parallel_arg(self):
return ['-j', 'auto']
return []
- def venv_sphinx_supports_latexmk(self):
- """
- Check if ``sphinx`` from the user's venv supports ``latexmk``.
-
- If the version of ``sphinx`` is greater or equal to 1.6.1 it returns
- ``True`` and ``False`` otherwise.
-
- See: https://www.sphinx-doc.org/en/master/changes.html#release-1-6-1-released-may-16-2017
- """
-
- command = [
- self.python_env.venv_bin(filename='python'),
- '-c',
- (
- '"'
- 'import sys; '
- 'import sphinx; '
- 'sys.exit(0 if sphinx.version_info >= (1, 6, 1) else 1)'
- '"'
- ),
- ]
-
- cmd_ret = self.run(
- *command,
- bin_path=self.python_env.venv_bin(),
- cwd=self.project_path,
- escape_command=False, # used on DockerBuildCommand
- shell=True, # used on BuildCommand
- record=False,
- )
- return cmd_ret.exit_code == 0
-
class HtmlBuilder(BaseSphinx):
relative_output_dir = "_readthedocs/html"
@@ -548,12 +516,7 @@ def build(self):
raise BuildUserError("No TeX files were found.")
# Run LaTeX -> PDF conversions
- # Build PDF with ``latexmk`` if Sphinx supports it, otherwise fallback
- # to ``pdflatex`` to support old versions
- if self.venv_sphinx_supports_latexmk():
- success = self._build_latexmk(self.project_path)
- else:
- success = self._build_pdflatex(tex_files)
+ success = self._build_latexmk(self.project_path)
self._post_build()
return success
@@ -625,59 +588,6 @@ def _build_latexmk(self, cwd):
return cmd_ret.successful
- def _build_pdflatex(self, tex_files):
- pdflatex_cmds = [
- ['pdflatex', '-interaction=nonstopmode', tex_file]
- for tex_file in tex_files
- ] # yapf: disable
- makeindex_cmds = [
- [
- "makeindex",
- "-s",
- "python.ist",
- "{}.idx".format(
- os.path.splitext(
- os.path.relpath(tex_file, self.absolute_output_dir)
- )[0],
- ),
- ]
- for tex_file in tex_files
- ] # yapf: disable
-
- if self.build_env.command_class == DockerBuildCommand:
- latex_class = DockerLatexBuildCommand
- else:
- latex_class = LatexBuildCommand
- pdf_commands = []
- for cmd in pdflatex_cmds:
- cmd_ret = self.build_env.run_command_class(
- cls=latex_class,
- cmd=cmd,
- cwd=self.absolute_output_dir,
- warn_only=True,
- )
- pdf_commands.append(cmd_ret)
- for cmd in makeindex_cmds:
- cmd_ret = self.build_env.run_command_class(
- cls=latex_class,
- cmd=cmd,
- cwd=self.absolute_output_dir,
- warn_only=True,
- )
- pdf_commands.append(cmd_ret)
- for cmd in pdflatex_cmds:
- cmd_ret = self.build_env.run_command_class(
- cls=latex_class,
- cmd=cmd,
- cwd=self.absolute_output_dir,
- warn_only=True,
- )
- pdf_match = PDF_RE.search(cmd_ret.output)
- if pdf_match:
- self.pdf_file_name = pdf_match.group(1).strip()
- pdf_commands.append(cmd_ret)
- return all(cmd.successful for cmd in pdf_commands)
-
def _post_build(self):
"""Internal post build to cleanup PDF output directory and leave only one .pdf file."""
| diff --git a/readthedocs/projects/tests/test_build_tasks.py b/readthedocs/projects/tests/test_build_tasks.py
--- a/readthedocs/projects/tests/test_build_tasks.py
+++ b/readthedocs/projects/tests/test_build_tasks.py
@@ -719,18 +719,9 @@ def test_build_commands_executed(
cwd=mock.ANY,
bin_path=mock.ANY,
),
+ mock.call("cat", "latexmkrc", cwd=mock.ANY),
# NOTE: pdf `mv` commands and others are not here because the
# PDF resulting file is not found in the process (`_post_build`)
- mock.call(
- mock.ANY,
- "-c",
- '"import sys; import sphinx; sys.exit(0 if sphinx.version_info >= (1, 6, 1) else 1)"',
- bin_path=mock.ANY,
- cwd=mock.ANY,
- escape_command=False,
- shell=True,
- record=False,
- ),
mock.call(
mock.ANY,
"-m",
| Builder: remove PDF support for `Sphinx<=1.6.1`
We are running a command on each build to detect if `sphinx>=1.6.1` and decide whether or not use `latexmk`.
https://github.com/readthedocs/readthedocs.org/blob/84a149a57ccd9c1b5cd47bb285263a273124a199/readthedocs/doc_builder/backends/sphinx.py#L310-L311
I quickly check our Metabase query and I found that we have 0 projects using such an older versions, https://ethicalads.metabaseapp.com/question/250-projects-using-sphinx-timeserie, so I'm happy to remove this code and always use `latexmk`.
This will simplify the code and remove the execution of one extra command on _all the builds_.
| 2023-01-31T15:49:00 |
|
readthedocs/readthedocs.org | 10,086 | readthedocs__readthedocs.org-10086 | [
"9603"
] | 077ff9fd0ffbfce64fef65fa58741126d2d52388 | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -160,6 +160,8 @@
# TODO: remove once we support different rtd config
# files per project.
"conf_py_path": f"/docs/{docset}/",
+ # Use to generate the Plausible "data-domain" attribute from the template
+ "plausible_domain": f"{os.environ.get('READTHEDOCS_PROJECT')}.readthedocs.io",
}
hoverxref_auto_ref = True
| Docs: use Plausible for analytics
We started using Plausible on our new website, we should eventually consolidate our own docs site there as well.
This might take a little bit of thought, but isn't a lot of work otherwise. Is there anything we care to preserve from GA or replicate from GA before migrating?
| Would it be sufficient to simply fetch and archive a few statistical reports on statistics we would want to refer back to? I'm thinking specifically about metrics we can use to assess how things are developing?
* Total site usage
* Individual page hits
* Geographical region
* Browser
* Search terms
* Top entry page / exit page
fin.
I'm bumping this up for the next sprint. We have been talking about this lately. We want to add Plausible in:
- [x] blog
- [ ] our documentation
That way, we will have everything in one place (together with about.readthedocs.com) and also be able to define some goals.
It's already on the blog, but adding it to our docs shouldn't be hard. 👍 | 2023-02-28T21:57:57 |
|
readthedocs/readthedocs.org | 10,113 | readthedocs__readthedocs.org-10113 | [
"10015"
] | 96fe6694a85c6e2b5e840fe0600ca7c3ca256a3b | diff --git a/readthedocs/doc_builder/backends/sphinx.py b/readthedocs/doc_builder/backends/sphinx.py
--- a/readthedocs/doc_builder/backends/sphinx.py
+++ b/readthedocs/doc_builder/backends/sphinx.py
@@ -26,8 +26,6 @@
from readthedocs.projects.utils import safe_write
from ..base import BaseBuilder
-from ..constants import PDF_RE
-from ..environments import BuildCommand, DockerBuildCommand
from ..exceptions import BuildUserError
from ..signals import finalize_sphinx_context_data
@@ -488,30 +486,6 @@ def _post_build(self):
)
-class LatexBuildCommand(BuildCommand):
-
- """Ignore LaTeX exit code if there was file output."""
-
- def run(self):
- super().run()
- # Force LaTeX exit code to be a little more optimistic. If LaTeX
- # reports an output file, let's just assume we're fine.
- if PDF_RE.search(self.output):
- self.exit_code = 0
-
-
-class DockerLatexBuildCommand(DockerBuildCommand):
-
- """Ignore LaTeX exit code if there was file output."""
-
- def run(self):
- super().run()
- # Force LaTeX exit code to be a little more optimistic. If LaTeX
- # reports an output file, let's just assume we're fine.
- if PDF_RE.search(self.output):
- self.exit_code = 0
-
-
class PdfBuilder(BaseSphinx):
"""Builder to generate PDF documentation."""
@@ -589,11 +563,6 @@ def _build_latexmk(self, cwd):
cwd=self.absolute_host_output_dir,
)
- if self.build_env.command_class == DockerBuildCommand:
- latex_class = DockerLatexBuildCommand
- else:
- latex_class = LatexBuildCommand
-
cmd = [
'latexmk',
'-r',
@@ -610,16 +579,18 @@ def _build_latexmk(self, cwd):
'-interaction=nonstopmode',
]
- cmd_ret = self.build_env.run_command_class(
- cls=latex_class,
- cmd=cmd,
- warn_only=True,
- cwd=self.absolute_host_output_dir,
- )
-
- self.pdf_file_name = f'{self.project.slug}.pdf'
+ try:
+ cmd_ret = self.run(
+ *cmd,
+ cwd=self.absolute_host_output_dir,
+ )
+ self.pdf_file_name = f"{self.project.slug}.pdf"
+ return cmd_ret.successful
- return cmd_ret.successful
+ # Catch the exception and re-raise it with a specific message
+ except BuildUserError:
+ raise BuildUserError(BuildUserError.PDF_COMMAND_FAILED)
+ return False
def _post_build(self):
"""Internal post build to cleanup PDF output directory and leave only one .pdf file."""
diff --git a/readthedocs/doc_builder/constants.py b/readthedocs/doc_builder/constants.py
--- a/readthedocs/doc_builder/constants.py
+++ b/readthedocs/doc_builder/constants.py
@@ -1,15 +1,12 @@
"""Doc build constants."""
-import re
import structlog
from django.conf import settings
log = structlog.get_logger(__name__)
-PDF_RE = re.compile('Output written on (.*?)')
-
# Docker
DOCKER_SOCKET = settings.DOCKER_SOCKET
DOCKER_VERSION = settings.DOCKER_VERSION
diff --git a/readthedocs/doc_builder/exceptions.py b/readthedocs/doc_builder/exceptions.py
--- a/readthedocs/doc_builder/exceptions.py
+++ b/readthedocs/doc_builder/exceptions.py
@@ -52,6 +52,13 @@ class BuildUserError(BuildBaseException):
"and it is not currently supported. "
'Please, remove all the files but the "{artifact_type}" your want to upload.'
)
+ PDF_COMMAND_FAILED = gettext_noop(
+ "PDF generation failed. "
+ "The build log below contains information on what errors caused the failure."
+ "Our code has recently changed to fail the entire build on PDF errors, "
+ "where we used to pass the build when a PDF was created."
+ "Please contact us if you need help understanding this error."
+ )
class BuildUserSkip(BuildUserError):
| diff --git a/readthedocs/projects/tests/mockers.py b/readthedocs/projects/tests/mockers.py
--- a/readthedocs/projects/tests/mockers.py
+++ b/readthedocs/projects/tests/mockers.py
@@ -58,13 +58,6 @@ def _mock_artifact_builders(self):
"project-slug.pdf",
)
- self.patches['builder.pdf.LatexBuildCommand.run'] = mock.patch(
- 'readthedocs.doc_builder.backends.sphinx.LatexBuildCommand.run',
- return_value=mock.MagicMock(output='stdout', successful=True)
- )
- # self.patches['builder.pdf.LatexBuildCommand.output'] = mock.patch(
- # 'readthedocs.doc_builder.backends.sphinx.LatexBuildCommand.output',
- # )
self.patches['builder.pdf.glob'] = mock.patch(
'readthedocs.doc_builder.backends.sphinx.glob',
return_value=['output.file'],
diff --git a/readthedocs/projects/tests/test_build_tasks.py b/readthedocs/projects/tests/test_build_tasks.py
--- a/readthedocs/projects/tests/test_build_tasks.py
+++ b/readthedocs/projects/tests/test_build_tasks.py
@@ -723,6 +723,18 @@ def test_build_commands_executed(
bin_path=mock.ANY,
),
mock.call("cat", "latexmkrc", cwd=mock.ANY),
+ mock.call(
+ "latexmk",
+ "-r",
+ "latexmkrc",
+ "-pdf",
+ "-f",
+ "-dvi-",
+ "-ps-",
+ "-jobname=project",
+ "-interaction=nonstopmode",
+ cwd=mock.ANY,
+ ),
# NOTE: pdf `mv` commands and others are not here because the
# PDF resulting file is not found in the process (`_post_build`)
mock.call(
@@ -794,6 +806,14 @@ def test_build_commands_executed(
record=False,
demux=True,
),
+ mock.call(
+ "test",
+ "-x",
+ "_build/html",
+ record=False,
+ demux=True,
+ cwd=mock.ANY,
+ ),
]
)
| Document build failed with the error "Build output directory for format "pdf" contains multiple files and it is not currently supported."
I build my project document successfully before, but recently it failed with the error message "Build output directory for format "pdf" contains multiple files and it is not currently supported. Please, remove all the files but the "pdf" your want to upload. "
<img width="789" alt="image" src="https://user-images.githubusercontent.com/100339350/218298781-a0f0fa28-e508-4bbb-9356-19eec81a5705.png">
it seems like error occurred in this step:
<img width="762" alt="image" src="https://user-images.githubusercontent.com/100339350/218298834-ee986ae5-a7fa-4e1e-8cff-6dce75f50639.png">
I didn't find any method to solve this problem. Is there anyone who knows this problem? Thank u. T-T
| Hi @TTYee! Can you provide the information asked in the template when you filled the form? (project URL, build URL, etc)
Cython has the same problem.
* Read the Docs project URL: https://readthedocs.org/projects/cython/
* Build URL (the latest successful build): https://readthedocs.org/projects/cython/builds/19395633/
* Build URL (the first failed build): https://readthedocs.org/projects/cython/builds/19423261/
@GalaxySnail hi! Are you able to build the PDF locally using latexmk?
> hi! Are you able to build the PDF locally using latexmk?
I haven't tried it. I'm running diff on these 2 logs:
``` bash
diff -Nu <(curl https://readthedocs.org/api/v2/build/19395633.txt) <(curl https://readthedocs.org/api/v2/build/19423261.txt) | head -n 20
```
``` diff
--- /proc/self/fd/11 2023-02-19 07:42:14.000000000 +0800
+++ /proc/self/fd/14 2023-02-19 07:42:14.000000000 +0800
@@ -1,18 +1,18 @@
Read the Docs build information
-Build id: 19395633
+Build id: 19423261
Project: cython
Version: latest
Commit: b24286d35aaaf1318cd2bebb10e3e16d72965a5b
-Date: 2023-02-06T17:21:37.353864Z
+Date: 2023-02-08T17:40:15.490385Z
State: finished
-Success: True
+Success: False
-[rtd-command-info] start-time: 2023-02-06T17:21:38.926762Z, end-time: 2023-02-06T17:21:47.712146Z, duration: 8, exit-code: 0
+[rtd-command-info] start-time: 2023-02-08T17:40:17.041926Z, end-time: 2023-02-08T17:40:26.123477Z, duration: 9, exit-code: 0
git clone --no-single-branch --depth 50 https://github.com/cython/cython .
Cloning into '.'...
```
It looks like most of them are the same, latexmk was always failing, the only meaningful different is `Success: True` and `Success: False`. Then I checked previous logs, document build started to succeed since a year ago:
- failed: https://readthedocs.org/projects/cython/builds/15999585/
- successful: https://readthedocs.org/projects/cython/builds/16031152/
It seems that something changed in readthedocs.org, failed latexmk didn't cause the build to fail. However, it seems to be fixed recently, so the build starts failing again.
Can you try building the PDF locally?
I found this error is confusing our users in different ways. The real solution here would be to **fail the build immediately when any of the PDF commands failed** (https://github.com/readthedocs/readthedocs.org/issues/7884). Right now, we are just moving forward and then the users are receiving this unexpected and confusing messages.
These are other related issues as well:
* https://github.com/readthedocs/readthedocs.org/issues/8359
* https://github.com/readthedocs/readthedocs.org/issues/3906
* https://github.com/readthedocs/readthedocs.org/issues/7884
* https://github.com/readthedocs/readthedocs.org/issues/8615
I think it would be good to standardize this behavior to match with the rest of the commands.
@GalaxySnail Hi. I've found the PDF output is not neccessary for my project( I only need HTML output up to now). So I closed the "enable PDF build"(which is set by default) on the "advanced setup" page of our project then the project can be built successfully again. I mean, if PDF output is not neccessary for you either, you can try this method.

> Can you try building the PDF locally?
Yes. It can reproduce locally, but I don't know why.
``` shell
$ LC_ALL=C python -m sphinx -T -E -b latex -d _build/doctrees -D language=en . _readthedocs/pdf
```
<details>
<summary>expand output</summary>
```
Running Sphinx v6.1.3
making output directory... done
loading intersphinx inventory from https://docs.python.org/3/objects.inv...
building [mo]: targets for 0 po files that are out of date
writing output...
building [latex]: all documents
updating environment: [new config] 58 added, 0 changed, 0 removed
reading sources... [100%] src/userguide/wrapping_CPlusPlus
looking for now-outdated files... none found
pickling environment... done
checking consistency... D:\Documents\git\cython\docs\src\reference\interfacing_with_other_code.rst: WARNING: document isn't included in any toctree
D:\Documents\git\cython\docs\src\reference\limitations.rst: WARNING: document isn't included in any toctree
D:\Documents\git\cython\docs\src\reference\special_mention.rst: WARNING: document isn't included in any toctree
done
processing reference.tex... src/reference/index src/reference/compilation src/reference/special_methods_table
resolving references...
done
writing... done
processing tutorial.tex... src/tutorial/index src/tutorial/cython_tutorial src/tutorial/external src/tutorial/clibraries src/tutorial/cdef_classes src/tutorial/pxd_files src/tutorial/caveats src/tutorial/profiling_tutorial src/tutorial/strings src/tutorial/memory_allocation src/tutorial/embedding src/tutorial/pure src/tutorial/numpy src/tutorial/array src/tutorial/parallelization src/tutorial/readings src/tutorial/related_work src/tutorial/appendix
resolving references...
D:\Documents\git\cython\docs\src\tutorial\clibraries.rst:765: WARNING: Unparseable C cross-reference: 'void*'
Invalid C declaration: Expected identifier in nested name, got keyword: void [error at 4]
void*
----^
D:\Documents\git\cython\docs\src\tutorial\clibraries.rst:839: WARNING: Unparseable C cross-reference: 'void*'
Invalid C declaration: Expected identifier in nested name, got keyword: void [error at 4]
void*
----^
D:\Documents\git\cython\docs\src\tutorial\clibraries.rst:839: WARNING: Unparseable C cross-reference: 'void*'
Invalid C declaration: Expected identifier in nested name, got keyword: void [error at 4]
void*
----^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:167: WARNING: Unparseable C cross-reference: 'char*'
Invalid C declaration: Expected identifier in nested name, got keyword: char [error at 4]
char*
----^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:172: WARNING: Unparseable C cross-reference: 'char*'
Invalid C declaration: Expected identifier in nested name, got keyword: char [error at 4]
char*
----^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:326: WARNING: Unparseable C cross-reference: 'char*'
Invalid C declaration: Expected identifier in nested name, got keyword: char [error at 4]
char*
----^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:350: WARNING: Unparseable C cross-reference: 'std::string'
Invalid C declaration: Expected end of definition. [error at 3]
std::string
---^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:479: WARNING: Unparseable C cross-reference: 'char'
Invalid C declaration: Expected identifier in nested name, got keyword: char [error at 4]
char
----^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:493: WARNING: Unparseable C cross-reference: 'char'
Invalid C declaration: Expected identifier in nested name, got keyword: char [error at 4]
char
----^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:493: WARNING: Unparseable C cross-reference: 'char'
Invalid C declaration: Expected identifier in nested name, got keyword: char [error at 4]
char
----^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:511: WARNING: Unparseable C cross-reference: 'char'
Invalid C declaration: Expected identifier in nested name, got keyword: char [error at 4]
char
----^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:511: WARNING: Unparseable C cross-reference: 'unsigned char'
Invalid C declaration: Expected identifier in nested name, got keyword: unsigned [error at 8]
unsigned char
--------^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:535: WARNING: Unparseable C cross-reference: 'long'
Invalid C declaration: Expected identifier in nested name, got keyword: long [error at 4]
long
----^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:535: WARNING: Unparseable C cross-reference: 'unsigned long'
Invalid C declaration: Expected identifier in nested name, got keyword: unsigned [error at 8]
unsigned long
--------^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:535: WARNING: Unparseable C cross-reference: 'int'
Invalid C declaration: Expected identifier in nested name, got keyword: int [error at 3]
int
---^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:616: WARNING: Unparseable C cross-reference: 'char*'
Invalid C declaration: Expected identifier in nested name, got keyword: char [error at 4]
char*
----^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:653: WARNING: Unparseable C cross-reference: 'wchar_t*'
Invalid C declaration: Expected end of definition. [error at 7]
wchar_t*
-------^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:663: WARNING: Unparseable C cross-reference: 'Py_UNICODE*'
Invalid C declaration: Expected end of definition. [error at 10]
Py_UNICODE*
----------^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:663: WARNING: Unparseable C cross-reference: 'char*'
Invalid C declaration: Expected identifier in nested name, got keyword: char [error at 4]
char*
----^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:669: WARNING: Unparseable C cross-reference: 'Py_UNICODE*'
Invalid C declaration: Expected end of definition. [error at 10]
Py_UNICODE*
----------^
D:\Documents\git\cython\docs\src\tutorial\strings.rst:689: WARNING: Unparseable C cross-reference: 'Py_UNICODE*'
Invalid C declaration: Expected end of definition. [error at 10]
Py_UNICODE*
----------^
D:\Documents\git\cython\docs\src\tutorial\memory_allocation.rst:25: WARNING: term not in glossary: 'Heap allocation'
done
writing... done
copying images... [100%] src/tutorial/python_division.png
copying TeX support files... copying TeX support files...
done
build succeeded, 25 warnings.
The LaTeX files are in _readthedocs\pdf.
```
</details>
``` shell
$ cd _readthedocs/pdf
$ cat latexmkrc
```
``` perl
$latex = 'latex ' . $ENV{'LATEXOPTS'} . ' %O %S';
$pdflatex = 'pdflatex ' . $ENV{'LATEXOPTS'} . ' %O %S';
$lualatex = 'lualatex ' . $ENV{'LATEXOPTS'} . ' %O %S';
$xelatex = 'xelatex --no-pdf ' . $ENV{'LATEXOPTS'} . ' %O %S';
$makeindex = 'makeindex -s python.ist %O -o %D %S';
add_cus_dep( "glo", "gls", 0, "makeglo" );
sub makeglo {
return system( "makeindex -s gglo.ist -o '$_[0].gls' '$_[0].glo'" );
}
```
``` shell
$ latexmk -r latexmkrc -pdf -f -dvi- -ps- -jobname=cython -interaction=nonstopmode
```
```
Win CP console initial and current in/out Win: (936, 936), (936, 936)
Coding system for system and terminal: 'CP936'
---
Use of uninitialized value in concatenation (.) or string at (eval 10) line 1.
Use of uninitialized value in concatenation (.) or string at (eval 10) line 2.
Use of uninitialized value in concatenation (.) or string at (eval 10) line 3.
Use of uninitialized value in concatenation (.) or string at (eval 10) line 4.
Latexmk: A user -r option asked me to process an rc file an extra time.
Name of file = 'latexmkrc'
Abs. path = 'D:\Documents\git\cython\docs\_readthedocs\pdf\latexmkrc'
I'll not process it
Rc files read:
latexmkrc
Latexmk: This is Latexmk, John Collins, 17 Mar. 2022. Version 4.77, version: 4.77.
Latexmk: Need to specify at most one filename if jobname specified without a %A,
but 2 were found (after defaults and wildcarding).
Use
latexmk -help
to get usage information
C:\msys2\ucrt64\bin\runscript.tlu:915: command failed with exit code 10:
perl.exe c:\msys2\ucrt64\share\texmf-dist\scripts\latexmk\latexmk.pl -r latexmkrc -pdf -f -dvi- -ps- -jobname=cython -interaction=nonstopmode
```
> I found this error is confusing our users in different ways. The real solution here would be to **fail the build immediately when any of the PDF commands failed** (#7884). Right now, we are just moving forward and then the users are receiving this unexpected and confusing messages.
> Hi. I've found the PDF output is not neccessary for my project( I only need HTML output up to now). So I closed the "enable PDF build"(which is set by default) on the "advanced setup" page of our project then the project can be built successfully again. I mean, if PDF output is not neccessary for you either, you can try this method.
Thanks for the explanation. As a workaround, Cython has decided to stop building PDF docs. | 2023-03-06T19:21:08 |
readthedocs/readthedocs.org | 10,115 | readthedocs__readthedocs.org-10115 | [
"8359"
] | 96fe6694a85c6e2b5e840fe0600ca7c3ca256a3b | diff --git a/readthedocs/projects/migrations/0098_pdf_epub_opt_in.py b/readthedocs/projects/migrations/0098_pdf_epub_opt_in.py
new file mode 100644
--- /dev/null
+++ b/readthedocs/projects/migrations/0098_pdf_epub_opt_in.py
@@ -0,0 +1,49 @@
+# Generated by Django 3.2.18 on 2023-03-06 20:08
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ("projects", "0097_add_http_header"),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name="historicalproject",
+ name="enable_epub_build",
+ field=models.BooleanField(
+ default=False,
+ help_text="Create a EPUB version of your documentation with each build.",
+ verbose_name="Enable EPUB build",
+ ),
+ ),
+ migrations.AlterField(
+ model_name="historicalproject",
+ name="enable_pdf_build",
+ field=models.BooleanField(
+ default=False,
+ help_text="Create a PDF version of your documentation with each build.",
+ verbose_name="Enable PDF build",
+ ),
+ ),
+ migrations.AlterField(
+ model_name="project",
+ name="enable_epub_build",
+ field=models.BooleanField(
+ default=False,
+ help_text="Create a EPUB version of your documentation with each build.",
+ verbose_name="Enable EPUB build",
+ ),
+ ),
+ migrations.AlterField(
+ model_name="project",
+ name="enable_pdf_build",
+ field=models.BooleanField(
+ default=False,
+ help_text="Create a PDF version of your documentation with each build.",
+ verbose_name="Enable PDF build",
+ ),
+ ),
+ ]
diff --git a/readthedocs/projects/models.py b/readthedocs/projects/models.py
--- a/readthedocs/projects/models.py
+++ b/readthedocs/projects/models.py
@@ -324,14 +324,14 @@ class Project(models.Model):
# Sphinx specific build options.
enable_epub_build = models.BooleanField(
_('Enable EPUB build'),
- default=True,
+ default=False,
help_text=_(
'Create a EPUB version of your documentation with each build.',
),
)
enable_pdf_build = models.BooleanField(
_('Enable PDF build'),
- default=True,
+ default=False,
help_text=_(
'Create a PDF version of your documentation with each build.',
),
| Make PDF and EPUB opt-in, rather than opt-out?
At the moment, every newly created project on Read the Docs has PDF and EPUB builds enabled in the UI:

If I understand correctly, this can be overridden by:
1. Unchecking those boxes in the UI, or
2. Using a `.readthedocs.yaml` file, where the default for `formats` is `[]` https://docs.readthedocs.io/en/stable/config-file/v2.html#formats
However, building PDFs is quite costly, error-prone, and unclear if being used by a majority of our users. On a related note, PDF errors are now passing silently #7884.
Given that the config file (2) in itself is opt-in, I wonder if we should make PDF and EPUB off by default on the UI.
Some questions that might help making this decision, although I don't think we should hard block the decision on those:
- What percentage of active\* projects use a config file on their default version?
- What percentage of active\* projects have zero downloads on their PDFs and EPUBs?
(*active = with X builds in the past Y months? with NNNN visits in the past Y months?)
Thoughts @readthedocs/core ?
| We can look at Google Analytics for an idea of the number of PDF downloads. We track some events on the flyout menu, for some numbers here (in the month of June):
* 855,989 pdf download click events
* 271,383 htmlzip download click events
* 263,023 epub download click events
* Only a couple html or zip download click events
This doesn't necessarily include *all* of the downloads though, because you can download outside the flyout menu. It probably is the most likely download point though.
So, PDF download events are more or less 1.5 % of the pageviews. And I bet it's not evenly distributed, surely there are a handful of projects that concentrate most of the downloads.
In any case, it might sound like I'm advocating for removing them - I'm not, just to make it clear. But:
- If projects want PDFs, I think they should enable them, and
- Changing the UI defaults would make the UI consistent with the v2 config file
I'm on the fence here because I think the main benefit of the PDF is for readers without the internet. This is not always considered by the author of the documentation and in those cases, building it by default is 👍🏼 --I see this as a "feature of Read the Docs" (all the projects has PDF), not as a "feature of this particular documentation"
On the other hand, there are problems (like the ones you mentioned) that make me think that the benefit for the readers causes problems to authors. So, if we could easily fix them, I'd keep it enabled by default.
Finally, I think consistency is the best. Having them enabled by default in UI and config file v1, but not in v2 sounds as bad UX to me. It seems our last decision here (config v2) was to disable them by default --so, even I don't really like the outcome, we should probably be consistent and do the same for the other cases if possible.
> I'm on the fence here because I think the main benefit of the PDF is for readers without the internet. This is not always considered by the author of the documentation and in those cases, building it by default is 👍🏼
Yeah, I'm with you on this one.
> I see this as a "feature of Read the Docs" (all the projects has PDF), not as a "feature of this particular documentation"
I agree, but at the same time, silently failing if the PDF doesn't build (#7884) or not having PDFs for projects with v2 configuration (~this issue) is not helping the feature. We discussed this a lot in https://github.com/readthedocs/readthedocs.org/pull/8106 with @ericholscher - at the beginning I was a big defender of LaTeX, but since then I have realized that PDFs are more fragile than I thought.
In my view, if we want to give some love to PDFs on RTD, we should have a roadmap to:
1. Explore/promote/improve alternative PDF generation tools for Sphinx that do not depend on LaTeX, like https://github.com/rst2pdf/rst2pdf or https://github.com/brechtm/rinohtype
2. Disable silent PDF failures, or make them configurable as @humitos suggested in #7884
3. Have a "config v3" (or whatever we end up having by that time) that leaves PDFs enabled
But for now, I'm advocating for consistency.
The goal was to eventually move everyone to a config file, but that obviously hasn't happened. I'm 👍 on changing the default, since it will only apply to new projects and be consistent. It will also save us some build resources, and users on build times & concurrency -- and most users only use the HTML.
I'm also on the fence. I feel like this is a feature that we are really bad at promoting and developing, but should absolutely be focusing on more as it is a unique feature of RTD. But, having said that, making the option default off but also working on the feature or promoting the feature more can both happen simultaneously. So, I'm also :+1: on consistency here for now.
I was about to comment the rst2pdf is very unmaintained, but looks like new maintainers finally added python 3 support :100:
However, I don't know I trust rst2pdf anymore than I trust latex translation with Sphinx, at least at the scale we need a solution to work. rst2pdf was unmaintained for a long time and isn't as mature as sphinx, and we'll definitely hit similar edge cases with rst2pdf. Sounds like you're probably advocating for supporting both options. I would probably only be :+1: on adding a secondary builder type, but with the caveat there that rst2pdf/whatever would have to solve a lot of the problems we have with Sphinx/latex to be worth the cost. I don't think this will be the case though.
I believe there is a user extension that can already be used to use rst2pdf instead of latex for pdf generation too. It might be worth exploring, and this might also lead to solving a long standing bug with RTD where we only expect one PDF file output.
Before we start changing tooling, we should look into our data and identify what the actual issues with PDF generation are. Core team sees a lot of PDF failures because it's part of our job to support/debug them. The average user's interaction with PDF builds is probably unnoticeable in many cases however.
For an absolute out of left field option, we could discuss developing ePUB into a nicer experience. Really just a nicer theme would be required here, and there is prior art here that we're just not using. ePUB could be a default enabled build type if the experience is good. It's not as portable as PDF, but the experience with a nice theme is more usable across a larger number of devices. I've also gathered feedback on alternative formats in the past and MOBI support (which is kindle support, and a derivative of ePUB) was the most popular by far.
I'm 👍 on moving forward turning this off by default. I just had another support request where a user's build was breaking on PDF, and they didn't even know/need that. I think it's brittle enough to turn it off by default, and will also save us resources and users build time.
A data point about the interest of PDFs: `pdf` is the fourth most searched term in our docs.

Update: rst2pdf seems to be almost ready for Sphinx 4 compatibility, which is nice https://github.com/rst2pdf/rst2pdf/pull/1020
Everything is good news! 📰 We now support building all the formats using the tooling you want (#9888). People can use `SimplePDF`, `rst2pdf`, and any other tool they want and still keep everything integrated in the same way as it currently is with LaTeX.
That said, I'd like to come back to this and prioritize:
- turn off building PDF and ePUB by default (because of the reasons mentioned in this thread)
- fail the build if _any of the default commands_ for building the documentation breaks (this includes `sphinx -b latex`, `latexmk`, etc) -- #7884
- communicate the error clearly to the user at the top, in the same way as we are doing with the other errors
I think this will avoid confusions to authors, but also to readers that may download a half-baked PDF that's broken. Readers caring about these other formats will ask authors to generate them. Considering that this is now more flexible, and that it support more tools, I'd say authors are more likely to accept these proposal and care about having _better quality_ extra formats. Everybody wins! 🥇
By the way, here is an example of using `rinohtype` to build a PDF on Read the Docs with a simple configuration: https://test-builds.readthedocs.io/en/pdf-rinohtype/
Given the amount of support requests we have received in the last weeks about failing PDF (now we show a confusing error 😅 ), I'd say that many people didn't even know they had PDF enabled. These PDF have been failing forever but the build was succeeding and the HTML were updated properly. So, they didn't even noticed there was an issue with their PDF.
Because of this reason, I'm 👍🏼 on disabling these formats by default, starting with new projects. | 2023-03-06T20:14:09 |
|
readthedocs/readthedocs.org | 10,119 | readthedocs__readthedocs.org-10119 | [
"10103"
] | 9890ff45b2749eda3955dde7ab67bb5dbadae3e9 | diff --git a/readthedocs/doc_builder/director.py b/readthedocs/doc_builder/director.py
--- a/readthedocs/doc_builder/director.py
+++ b/readthedocs/doc_builder/director.py
@@ -330,7 +330,7 @@ def run_build_job(self, job):
commands = getattr(self.data.config.build.jobs, job, [])
for command in commands:
- environment.run(*command.split(), escape_command=False, cwd=cwd)
+ environment.run(command, escape_command=False, cwd=cwd)
def run_build_commands(self):
reshim_commands = (
@@ -344,7 +344,7 @@ def run_build_commands(self):
cwd = self.data.project.checkout_path(self.data.version.slug)
environment = self.build_environment
for command in self.data.config.build.commands:
- environment.run(*command.split(), escape_command=False, cwd=cwd)
+ environment.run(command, escape_command=False, cwd=cwd)
# Execute ``asdf reshim python`` if the user is installing a
# package since the package may contain an executable
| diff --git a/readthedocs/projects/tests/test_build_tasks.py b/readthedocs/projects/tests/test_build_tasks.py
--- a/readthedocs/projects/tests/test_build_tasks.py
+++ b/readthedocs/projects/tests/test_build_tasks.py
@@ -916,10 +916,11 @@ def test_build_jobs(self, load_yaml_config):
self.mocker.mocks["environment.run"].assert_has_calls(
[
- mock.call(
- "git", "fetch", "--unshallow", escape_command=False, cwd=mock.ANY
- ),
- mock.call("echo", "`date`", escape_command=False, cwd=mock.ANY),
+ # NOTE: when running commands from `build.jobs` or
+ # `build.commands` they are not split to allow multi-line
+ # scripts
+ mock.call("git fetch --unshallow", escape_command=False, cwd=mock.ANY),
+ mock.call("echo `date`", escape_command=False, cwd=mock.ANY),
],
any_order=True,
)
@@ -1035,10 +1036,11 @@ def test_build_commands(self, load_yaml_config):
"virtualenv",
"setuptools<58.3.0",
),
+ # NOTE: when running commands from `build.jobs` or
+ # `build.commands` they are not split to allow multi-line
+ # scripts
mock.call(
- "pip",
- "install",
- "pelican[markdown]",
+ "pip install pelican[markdown]",
escape_command=False,
cwd=mock.ANY,
),
@@ -1051,12 +1053,7 @@ def test_build_commands(self, load_yaml_config):
cwd=mock.ANY,
),
mock.call(
- "pelican",
- "--settings",
- "docs/pelicanconf.py",
- "--output",
- "$READTHEDOCS_OUTPUT/html/",
- "docs/",
+ "pelican --settings docs/pelicanconf.py --output $READTHEDOCS_OUTPUT/html/ docs/",
escape_command=False,
cwd=mock.ANY,
),
| Build: `build.jobs.post_build` produces mysterious shell syntax errors
## Details
* Read the Docs project URL: https://readthedocs.org/projects/mmref/
* Build URL (if applicable): https://readthedocs.org/projects/mmref/builds/19671469/ and https://readthedocs.org/projects/mmref/builds/19671433/ and most previous builds
* Read the Docs username (if applicable): https://readthedocs.org/profiles/rptb1/
## Expected Result
A post_build script `if test -n "A"; then echo "B"; fi` should not produce a syntax error.
A post_build script with `sh -c "foo"` in it should run the shell with the argument.
## Actual Result
`/bin/sh: 1: Syntax error: "then" unexpected`
`sh: 0: -c requires an argument`
I'm following the manual at ["Extend the build process"](https://docs.readthedocs.io/en/stable/build-customization.html#extend-the-build-process) and trying to insert a simple shell script at the post_build step.
A script that works locally (Ubuntu 22) is somehow mangled and causes syntax errors when inserted into the .readthedocs.yml at the post_build stage. I have tried many ways of writing the script and quoting it in YAML, to try to eliminate YAML as the culprit:
1. https://github.com/Ravenbrook/mps/pull/187/commits/4146715a1253a9ba7dcb8e528997545d52bb7ab2
2. https://github.com/Ravenbrook/mps/pull/187/commits/9fbc6224dbe1b645568fd32a938c77f84ef67d3a
3. https://github.com/Ravenbrook/mps/pull/187/commits/2a642594a525a3cbcd0d9c10f686bf6b6e43307c
4. https://github.com/Ravenbrook/mps/pull/187/commits/7fdbbd3ff01544484ba5a518b46c96dc9c98f1f3
5. https://github.com/Ravenbrook/mps/pull/187/commits/82f0d180a1bb0a7909d94eee22ab4a7b71a7dea3
6. https://github.com/Ravenbrook/mps/pull/187/commits/044e790f24217ad709dbc8ced9c35febaf02f0e9
7. https://github.com/Ravenbrook/mps/pull/187/commits/4a07b8418a4e5b1036dd50f13552098308972fb2
8. https://github.com/Ravenbrook/mps/pull/187/commits/1106c9cd066c9638ccf2f7772083384bb526673f
The [last one](https://github.com/Ravenbrook/mps/pull/187/commits/1106c9cd066c9638ccf2f7772083384bb526673f) is especially mysterious.
Is there some sort of mangling of the script going on, or am I just going blind?
None of these scripts produce syntax errors when I feed them to /bin/sh on my local Ubuntu. I have written similar scripts for multiple CI systems without coming across this problem before.
Thanks.
| A *literal copy of your own example* produces a syntax error.
I coped the text of the script from https://docs.readthedocs.io/en/stable/build-customization.html#id3 exactly in https://github.com/Ravenbrook/mps/pull/187/commits/b08bcf32bcd8d0e38e3e4bcc474820c16eda53e6 and it causes the syntax error seen at https://readthedocs.org/projects/mmref/builds/19671564/
Thanks for reporting this issue. We have been dealing with this already at https://github.com/pypi/warehouse/pull/12953 and I wasn't able to find the cause of it at that time. We will need to dig deeper into the code to understand what's going on here.
> We will need to dig deeper into the code to understand what's going on here.
This split looks highly suspicious. https://github.com/readthedocs/readthedocs.org/blob/f4215c5516eaa07c15afe49abdf1bb7a1a34655a/readthedocs/doc_builder/director.py#L333
From a 10 minute scan of your code, it looks to me like there might be a confusion between executing a single command and executing a script.
On a single command, where you want to split argv and do something like `run(['ls', '-l'])`, because you're basically doing something like a call to [exec(3)](https://linux.die.net/man/3/exec) and spawing a single process.
But a *script* is a single string that you want to pass *complete* to a shell, like `run(['/bin/sh', '-c', SCRIPT])` (or similar). Splitting the script makes no sense at all.
```
>>> post_build = """
... if test -n "${MMREF}"; then \
... mv "${READTHEDOCS_OUTPUT}/html" "${READTHEDOCS_OUTPUT}/mmref.in" && \
... python manual/make-mmref.py "${READTHEDOCS_OUTPUT}/mmref.in" "${READTHEDOCS_OUTPUT}/html" \
... fi
... """
>>> post_build.split()
['if', 'test', '-n', '"${MMREF}";', 'then', 'mv', '"${READTHEDOCS_OUTPUT}/html"', '"${READTHEDOCS_OUTPUT}/mmref.in"', '&&', 'python', 'manual/make-mmref.py', '"${READTHEDOCS_OUTPUT}/mmref.in"', '"${READTHEDOCS_OUTPUT}/html"', 'fi']
```
Again, I am in no way sure this is what's wrong, but your splits don't look right to me.
@rptb1 I'm curious if it works if you put the script in a file, and just run that from the post-build step? Not ideal, but I don't think our indention with this feature was having full-length scripts here, but having commands or scripts that could be run. We can definitely change that design, but curious if it works that way.
That said, if our docs examples aren't working, we should probably fix them :)
That might be a workaround for now, thanks.
The intention is clear from the documentation: currently the documented examples don't work.
You'll need to carefully document the subset of shell language you *do* support. Why not just pass everything to the shell? Is there a reason not to? | 2023-03-07T15:50:33 |
readthedocs/readthedocs.org | 10,133 | readthedocs__readthedocs.org-10133 | [
"10103"
] | dd07a3e9d467d9fd0047f6faf40d08fe0afed019 | diff --git a/readthedocs/doc_builder/environments.py b/readthedocs/doc_builder/environments.py
--- a/readthedocs/doc_builder/environments.py
+++ b/readthedocs/doc_builder/environments.py
@@ -14,7 +14,7 @@
from docker.errors import APIError as DockerAPIError
from docker.errors import DockerException
from docker.errors import NotFound as DockerNotFoundError
-from requests.exceptions import ConnectionError, ReadTimeout
+from requests.exceptions import ConnectionError, ReadTimeout # noqa
from requests_toolbelt.multipart.encoder import MultipartEncoder
from readthedocs.api.v2.client import api as api_v2
@@ -73,7 +73,7 @@ def __init__(
bin_path=None,
record_as_success=False,
demux=False,
- **kwargs,
+ **kwargs, # pylint: disable=unused-argument
):
self.command = command
self.shell = shell
@@ -252,8 +252,8 @@ def save(self):
{key: str(value) for key, value in data.items()}
)
resource = api_v2.command
- resp = resource._store['session'].post(
- resource._store['base_url'] + '/',
+ resp = resource._store["session"].post( # pylint: disable=protected-access
+ resource._store["base_url"] + "/", # pylint: disable=protected-access
data=encoder,
headers={
'Content-Type': encoder.content_type,
@@ -301,11 +301,40 @@ def run(self):
self.start_time = datetime.utcnow()
client = self.build_env.get_client()
+
+ # Create a copy of the environment to update PATH variable
+ environment = self._environment.copy()
+ # Default PATH variable
+ # This default comes from our Docker image:
+ #
+ # $ docker run --user docs -it --rm readthedocs/build:ubuntu-22.04 /bin/bash
+ # docs@bfe702e31cdd:~$ echo $PATH
+ # /home/docs/.asdf/shims:/home/docs/.asdf/bin
+ # :/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+ # docs@bfe702e31cdd:~$
+ asdf_paths = (
+ f"/home/{settings.RTD_DOCKER_USER}/.asdf/shims"
+ f":/home/{settings.RTD_DOCKER_USER}/.asdf/bin"
+ )
+ if settings.RTD_DOCKER_COMPOSE:
+ asdf_paths += ":/root/.asdf/shims:/root/.asdf/bin"
+
+ default_paths = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ environment["PATH"] = f"{asdf_paths}:{default_paths}"
+
+ # Prepend the BIN_PATH if it's defined
+ if self.bin_path:
+ original_path = environment.get("PATH")
+ escaped_bin_path = self._escape_command(self.bin_path)
+ environment["PATH"] = escaped_bin_path
+ if original_path:
+ environment["PATH"] = f"{escaped_bin_path}:{original_path}"
+
try:
exec_cmd = client.exec_create(
container=self.build_env.container_id,
cmd=self.get_wrapped_command(),
- environment=self._environment,
+ environment=environment,
user=self.user,
workdir=self.cwd,
stdout=True,
@@ -357,31 +386,18 @@ def get_wrapped_command(self):
"""
Wrap command in a shell and optionally escape special bash characters.
- In order to set the current working path inside a docker container, we
- need to wrap the command in a shell call manually.
-
Some characters will be interpreted as shell characters without
escaping, such as: ``pip install requests<0.8``. When passing
``escape_command=True`` in the init method this escapes a good majority
of those characters.
"""
- prefix = ''
- if self.bin_path:
- bin_path = self._escape_command(self.bin_path)
- prefix += f'PATH={bin_path}:$PATH '
-
command = (
' '.join(
self._escape_command(part) if self.escape_command else part
for part in self.command
)
)
- return (
- "/bin/sh -c '{prefix}{cmd}'".format(
- prefix=prefix,
- cmd=command,
- )
- )
+ return f"/bin/bash -c '{command}'"
def _escape_command(self, cmd):
r"""Escape the command by prefixing suspicious chars with `\`."""
@@ -524,14 +540,14 @@ class BuildEnvironment(BaseEnvironment):
"""
def __init__(
- self,
- project=None,
- version=None,
- build=None,
- config=None,
- environment=None,
- record=True,
- **kwargs,
+ self,
+ project=None,
+ version=None,
+ build=None,
+ config=None,
+ environment=None,
+ record=True,
+ **kwargs, # pylint: disable=unused-argument
):
super().__init__(project, environment)
self.version = version
@@ -557,7 +573,7 @@ def run(self, *cmd, **kwargs):
})
return super().run(*cmd, **kwargs)
- def run_command_class(self, *cmd, **kwargs): # pylint: disable=arguments-differ
+ def run_command_class(self, *cmd, **kwargs): # pylint: disable=signature-differs
kwargs.update({
'build_env': self,
})
| Build: `build.jobs.post_build` produces mysterious shell syntax errors
## Details
* Read the Docs project URL: https://readthedocs.org/projects/mmref/
* Build URL (if applicable): https://readthedocs.org/projects/mmref/builds/19671469/ and https://readthedocs.org/projects/mmref/builds/19671433/ and most previous builds
* Read the Docs username (if applicable): https://readthedocs.org/profiles/rptb1/
## Expected Result
A post_build script `if test -n "A"; then echo "B"; fi` should not produce a syntax error.
A post_build script with `sh -c "foo"` in it should run the shell with the argument.
## Actual Result
`/bin/sh: 1: Syntax error: "then" unexpected`
`sh: 0: -c requires an argument`
I'm following the manual at ["Extend the build process"](https://docs.readthedocs.io/en/stable/build-customization.html#extend-the-build-process) and trying to insert a simple shell script at the post_build step.
A script that works locally (Ubuntu 22) is somehow mangled and causes syntax errors when inserted into the .readthedocs.yml at the post_build stage. I have tried many ways of writing the script and quoting it in YAML, to try to eliminate YAML as the culprit:
1. https://github.com/Ravenbrook/mps/pull/187/commits/4146715a1253a9ba7dcb8e528997545d52bb7ab2
2. https://github.com/Ravenbrook/mps/pull/187/commits/9fbc6224dbe1b645568fd32a938c77f84ef67d3a
3. https://github.com/Ravenbrook/mps/pull/187/commits/2a642594a525a3cbcd0d9c10f686bf6b6e43307c
4. https://github.com/Ravenbrook/mps/pull/187/commits/7fdbbd3ff01544484ba5a518b46c96dc9c98f1f3
5. https://github.com/Ravenbrook/mps/pull/187/commits/82f0d180a1bb0a7909d94eee22ab4a7b71a7dea3
6. https://github.com/Ravenbrook/mps/pull/187/commits/044e790f24217ad709dbc8ced9c35febaf02f0e9
7. https://github.com/Ravenbrook/mps/pull/187/commits/4a07b8418a4e5b1036dd50f13552098308972fb2
8. https://github.com/Ravenbrook/mps/pull/187/commits/1106c9cd066c9638ccf2f7772083384bb526673f
The [last one](https://github.com/Ravenbrook/mps/pull/187/commits/1106c9cd066c9638ccf2f7772083384bb526673f) is especially mysterious.
Is there some sort of mangling of the script going on, or am I just going blind?
None of these scripts produce syntax errors when I feed them to /bin/sh on my local Ubuntu. I have written similar scripts for multiple CI systems without coming across this problem before.
Thanks.
| A *literal copy of your own example* produces a syntax error.
I coped the text of the script from https://docs.readthedocs.io/en/stable/build-customization.html#id3 exactly in https://github.com/Ravenbrook/mps/pull/187/commits/b08bcf32bcd8d0e38e3e4bcc474820c16eda53e6 and it causes the syntax error seen at https://readthedocs.org/projects/mmref/builds/19671564/
Thanks for reporting this issue. We have been dealing with this already at https://github.com/pypi/warehouse/pull/12953 and I wasn't able to find the cause of it at that time. We will need to dig deeper into the code to understand what's going on here.
> We will need to dig deeper into the code to understand what's going on here.
This split looks highly suspicious. https://github.com/readthedocs/readthedocs.org/blob/f4215c5516eaa07c15afe49abdf1bb7a1a34655a/readthedocs/doc_builder/director.py#L333
From a 10 minute scan of your code, it looks to me like there might be a confusion between executing a single command and executing a script.
On a single command, where you want to split argv and do something like `run(['ls', '-l'])`, because you're basically doing something like a call to [exec(3)](https://linux.die.net/man/3/exec) and spawing a single process.
But a *script* is a single string that you want to pass *complete* to a shell, like `run(['/bin/sh', '-c', SCRIPT])` (or similar). Splitting the script makes no sense at all.
```
>>> post_build = """
... if test -n "${MMREF}"; then \
... mv "${READTHEDOCS_OUTPUT}/html" "${READTHEDOCS_OUTPUT}/mmref.in" && \
... python manual/make-mmref.py "${READTHEDOCS_OUTPUT}/mmref.in" "${READTHEDOCS_OUTPUT}/html" \
... fi
... """
>>> post_build.split()
['if', 'test', '-n', '"${MMREF}";', 'then', 'mv', '"${READTHEDOCS_OUTPUT}/html"', '"${READTHEDOCS_OUTPUT}/mmref.in"', '&&', 'python', 'manual/make-mmref.py', '"${READTHEDOCS_OUTPUT}/mmref.in"', '"${READTHEDOCS_OUTPUT}/html"', 'fi']
```
Again, I am in no way sure this is what's wrong, but your splits don't look right to me.
@rptb1 I'm curious if it works if you put the script in a file, and just run that from the post-build step? Not ideal, but I don't think our indention with this feature was having full-length scripts here, but having commands or scripts that could be run. We can definitely change that design, but curious if it works that way.
That said, if our docs examples aren't working, we should probably fix them :)
That might be a workaround for now, thanks.
The intention is clear from the documentation: currently the documented examples don't work.
You'll need to carefully document the subset of shell language you *do* support. Why not just pass everything to the shell? Is there a reason not to?
I opened a PR that removes the `.split()` on those command and interprets the text as a whole. I did some small QA locally and it worked. I'd appreciate some review on that code since I'm not 100% sure that it's not gonna cause other issues.
I'm re-opening this issue because the PR that was merged doesn't solve all the issues.
I'm still not sure what's the best way to solve this problem 🤷🏼 . This case, for example, is not solved:

That exact same code works properly in my local terminal. Even wrapping it in `/bin/sh -c '{cmd}'` as we are doing in our code. It seems we are doing some manipulation of the command that breaks it, or, there is a problem when communicating with Docker.
This requires more researching and testing. Suggestions and ideas welcomed!
I think the main problem here is the `PATH=` environment variable prefix we are adding:
It works without the prefix:
```
▶ sh -c 'if [ "$READTHEDOCS_VERSION_TYPE" = "external" ] && git diff --quiet origin/main -- docs/ .readthedocs.yaml;
then
exit 183;
fi'
```
It *fails* with the prefix
```
▶ sh -c 'PATH=/bin:$PATH if [ "$READTHEDOCS_VERSION_TYPE" = "external" ] && git diff --quiet origin/main -- docs/ .readthedocs.yaml;
then
exit 183;
fi'
sh: line 1: if: command not found
sh: -c: line 2: syntax error near unexpected token `then'
sh: -c: line 2: `then'
```
This code is at https://github.com/readthedocs/readthedocs.org/blob/dd07a3e9d467d9fd0047f6faf40d08fe0afed019/readthedocs/doc_builder/environments.py#L356-L384
Another simplified example that breaks because of the environment variable prefix:
```
▶ sh -c 'if [ "1" ]; then echo 1; fi'
1
▶ sh -c 'FOO=bar if [ "1" ]; then echo 1; fi'
sh: -c: line 1: syntax error near unexpected token `then'
sh: -c: line 1: `FOO=bar if [ "1" ]; then echo 1; fi'
``` | 2023-03-09T10:52:05 |
|
readthedocs/readthedocs.org | 10,166 | readthedocs__readthedocs.org-10166 | [
"4820"
] | 5dab68d4a62a6841d677d2b8439fbf61ad15051f | diff --git a/readthedocs/api/v2/serializers.py b/readthedocs/api/v2/serializers.py
--- a/readthedocs/api/v2/serializers.py
+++ b/readthedocs/api/v2/serializers.py
@@ -5,6 +5,7 @@
from rest_framework import serializers
from readthedocs.api.v2.utils import normalize_build_command
+from readthedocs.builds.constants import EXTERNAL
from readthedocs.builds.models import Build, BuildCommandResult, Version
from readthedocs.oauth.models import RemoteOrganization, RemoteRepository
from readthedocs.projects.models import Domain, Project
@@ -124,11 +125,20 @@ class VersionAdminSerializer(VersionSerializer):
"""Version serializer that returns admin project data."""
project = ProjectAdminSerializer()
+ canonical_url = serializers.SerializerMethodField()
build_data = serializers.JSONField(required=False, write_only=True)
+ def get_canonical_url(self, obj):
+ return obj.project.get_docs_url(
+ lang_slug=obj.project.language,
+ version_slug=obj.slug,
+ external=obj.type == EXTERNAL,
+ )
+
class Meta(VersionSerializer.Meta):
fields = VersionSerializer.Meta.fields + [
"build_data",
+ "canonical_url",
]
diff --git a/readthedocs/builds/models.py b/readthedocs/builds/models.py
--- a/readthedocs/builds/models.py
+++ b/readthedocs/builds/models.py
@@ -601,7 +601,8 @@ class Meta:
proxy = True
def __init__(self, *args, **kwargs):
- self.project = APIProject(**kwargs.pop('project', {}))
+ self.project = APIProject(**kwargs.pop("project", {}))
+ self.canonical_url = kwargs.pop("canonical_url", None)
# These fields only exist on the API return, not on the model, so we'll
# remove them to avoid throwing exceptions due to unexpected fields
for key in ['resource_uri', 'absolute_url', 'downloads']:
diff --git a/readthedocs/doc_builder/director.py b/readthedocs/doc_builder/director.py
--- a/readthedocs/doc_builder/director.py
+++ b/readthedocs/doc_builder/director.py
@@ -615,6 +615,12 @@ def get_build_env_vars(self):
}
)
+ env.update(
+ {
+ "READTHEDOCS_CANONICAL_URL": self.data.version.canonical_url,
+ }
+ )
+
# Update environment from Project's specific environment variables,
# avoiding to expose private environment variables
# if the version is external (i.e. a PR build).
| diff --git a/readthedocs/projects/tests/test_build_tasks.py b/readthedocs/projects/tests/test_build_tasks.py
--- a/readthedocs/projects/tests/test_build_tasks.py
+++ b/readthedocs/projects/tests/test_build_tasks.py
@@ -301,6 +301,11 @@ def test_get_env_vars(self, load_yaml_config, build_environment, config, externa
# Local and Circle are different values.
# We only check it's present, but not its value.
READTHEDOCS_VIRTUALENV_PATH=mock.ANY,
+ READTHEDOCS_CANONICAL_URL=self.project.get_docs_url(
+ lang_slug=self.project.language,
+ version_slug=self.version.slug,
+ external=external,
+ ),
)
if not external:
expected_build_env_vars["PRIVATE_TOKEN"] = "a1b2c3"
diff --git a/readthedocs/rtd_tests/tests/test_api.py b/readthedocs/rtd_tests/tests/test_api.py
--- a/readthedocs/rtd_tests/tests/test_api.py
+++ b/readthedocs/rtd_tests/tests/test_api.py
@@ -2442,6 +2442,7 @@ def test_get_version_by_id(self):
"built": False,
"id": 18,
"active": True,
+ "canonical_url": "http://readthedocs.org/docs/pip/en/0.8/",
"project": {
"analytics_code": None,
"analytics_disabled": False,
| Inject canonical tag on mkdocs as we do with Sphinx
In Sphinx if a user has a custom domain we setup the canonical tag for the user (we also have some docs for that feature https://docs.readthedocs.io/en/latest/canonical.html)
In Sphinx we do that here https://github.com/rtfd/readthedocs.org/blob/9b2b17c0fc603267b4b20f5923862c2db82602da/readthedocs/doc_builder/templates/doc_builder/conf.py.tmpl#L95-L95
On mkdocs we don't do that, but it's possible using the `site_url` setting https://www.mkdocs.org/user-guide/configuration/#site_url
We can do that here
https://github.com/rtfd/readthedocs.org/blob/9b2b17c0fc603267b4b20f5923862c2db82602da/readthedocs/doc_builder/backends/mkdocs.py#L107-L107
Ref https://github.com/rtfd/readthedocs.org/issues/3137
| @stsewd I would like to take this issue up.
I didn't find the way to get the canonical url of the project, is it present in the keyword arguments that `append_conf` receive, i.e. `__`?
That comes from the project model, but this issue has the `Needed: design decision` label, we need to wait to be approved before writing some code
Just adding that looks like the `site_url` option have been there for a long time, nothing relevant on the changelog https://www.mkdocs.org/about/release-notes
> Just btw MKdocs just yesterday added support for Environment variables in version 1.2.0, all you have to do is make a variable with the final URL available for the build :)
https://www.mkdocs.org/about/release-notes/#support-added-for-environment-variables-in-the-configuration-file-1954
As mentioned by @astrojuanlu, we can use environment variables in `.mkdocs.yml` and that would be valuable in general to users.
However, then this needs to be fixed: https://github.com/readthedocs/readthedocs.org/issues/8529 | 2023-03-21T10:05:43 |
readthedocs/readthedocs.org | 10,168 | readthedocs__readthedocs.org-10168 | [
"9423"
] | 64ab38c162e402fd5ff4876c1715d8d1b24e7b3e | diff --git a/readthedocs/doc_builder/director.py b/readthedocs/doc_builder/director.py
--- a/readthedocs/doc_builder/director.py
+++ b/readthedocs/doc_builder/director.py
@@ -574,6 +574,12 @@ def get_rtd_env_vars(self):
"READTHEDOCS_OUTPUT": os.path.join(
self.data.project.checkout_path(self.data.version.slug), "_readthedocs/"
),
+ "READTHEDOCS_GIT_CLONE_URL": self.data.project.repo,
+ # TODO: we don't have access to the database from the builder.
+ # We need to find a way to expose HTML_URL here as well.
+ # "READTHEDOCS_GIT_HTML_URL": self.data.project.remote_repository.html_url,
+ "READTHEDOCS_GIT_IDENTIFIER": self.data.version.identifier,
+ "READTHEDOCS_GIT_COMMIT_HASH": self.data.build["commit"],
}
return env
| diff --git a/readthedocs/projects/tests/test_build_tasks.py b/readthedocs/projects/tests/test_build_tasks.py
--- a/readthedocs/projects/tests/test_build_tasks.py
+++ b/readthedocs/projects/tests/test_build_tasks.py
@@ -279,6 +279,9 @@ def test_get_env_vars(self, load_yaml_config, build_environment, config, externa
"READTHEDOCS_OUTPUT": os.path.join(
self.project.checkout_path(self.version.slug), "_readthedocs/"
),
+ "READTHEDOCS_GIT_CLONE_URL": self.project.repo,
+ "READTHEDOCS_GIT_IDENTIFIER": self.version.identifier,
+ "READTHEDOCS_GIT_COMMIT_HASH": self.build.commit,
}
self._trigger_update_docs_task()
| Build: expose VCS-related environment variables
So... with `sphinx-basic-ng`, a bunch of the theme developers in the Sphinx ecosystem are trying to get a shared base theme that we all build upon.
Notably, this has reusable implementations of various design "components" (https://sphinx-basic-ng.readthedocs.io/en/latest/usage/components/). In a few of them[^1],
```py
html_theme_options = {
"source_repository": "https://github.com/pradyunsg/sphinx-basic-ng/", # you can skip the trailing /
"source_branch": "main",
"source_directory": "docs", # or "docs/"
}
```
It would be *great* if builds on Read the Docs could auto-populate these theme variables, when they're not provided by the user.
That would make the community-maintained themes that are using these components "just work" on Read the Docs, similar to how the Read the Docs theme works! Since this is designed to be a shared implementation across themes (Furo and Lutra already use it, there's efforts underway to get this for pydata-sphinx-theme and sphinx-book-theme as well), it's somewhat theme-agnostic and would be a nice user experience improvement!
Notably, it would allow themes to remove custom code for supporting VCS information conditionally on Read the Docs (Furo does this!).
[^1]: [edit-this-page](https://sphinx-basic-ng.readthedocs.io/en/latest/usage/components/edit-this-page/) and [view-this-page](https://sphinx-basic-ng.readthedocs.io/en/latest/usage/components/view-this-page/)
| FWIW, this is (in effect) a replacement for https://github.com/readthedocs/readthedocs.org/blob/main/docs/user/guides/edit-source-links-sphinx.rst .
If you'd prefer to provide a fully-formatted link instead, that's coming in https://github.com/pradyunsg/sphinx-basic-ng/pull/34 -- which has the relevant documentation updates too. :)
> It would be _great_ if builds on Read the Docs could auto-populate these theme variables, when they're not provided by the user.
Would it be useful if Read the Docs passes these as environment variables (e.g. `READTHEDOCS_SOURCE_REPOSITORY`, etc) and it's the theme itself that defines them to the Sphinx context? We are moving away of defining things automagically behind the scenes for the users because it creates confusing scenarios.
> Would it be useful if Read the Docs passes these as environment variables (e.g. `READTHEDOCS_SOURCE_REPOSITORY`, etc) and it's the theme itself that defines them to the Sphinx context?
That works!
`READTHEDOCS_SOURCE_REPOSITORY` works for me, too, but I think it should be `READTHEDOCS_SOURCE_REPOSITORY_WEB_URL` since the source repository has other properties that we can also expose over time if requested? I think that `READTHEDOCS_SOURCE_REPOSITORY_URL` is ambiguous :) We can also trim `_SOURCE`.
* `READTHEDOCS_GIT_WEB_URL`: Indicates the root path of the repo website, i.e. https://github.com/org/repo
* `READTHEDOCS_GIT_PROVIDER`: "github"/"bitbucket"/"gitlab" - is that useful for you, @pradyunsg ?
* `READTHEDOCS_GIT_IDENTIFIER`: Populated with name of tag, branch or commit hash (only one of them and selected in that priority - if tag exists, uses tag, if branch uses branch otherwise commit hash)
* `READTHEDOCS_SOURCE_DIR`: If the Sphinx builder is active, we can put the directory here (note @humitos maybe good to call the new `READTHEDOCS_OUTPUT` `READTHEDOCS_OUTPUT_DIR` instead?)
I'm not against additional environment variables, those could be very handy in some situations, but I wanted to mention that the RTD build process already provides some (all?) of the mentioned information in `html_context`.
The source repository can be found by looking at `display_gitlab`, combined with `gitlab_user` and `gitlab_repo` (same for `bitbucket` and `github`).
I don't know if the actual branch name is available, and I'm not sure how reliable it would be anyway, but the commit is available in the field `commit`.
The source directory is available in `conf_py_path` (strictly speaking, `conf.py` doesn't have to be in the source directory, but normally it is).
I'm using all this information to create a source link at the bottom of each page in my `insipid` theme, see https://insipid-sphinx-theme.readthedocs.io/en/0.4.1/configuration.html#confval-html_show_sourcelink
@mgeier
> I'm not against additional environment variables, those could be very handy in some situations, but I wanted to mention that the RTD build process already provides some (all?) of the mentioned information in html_context.
We are moving away from built-in Sphinx-only features and trying to support other tools in a cleaner and generic way. So, the additional environment variables would be useful for these other documentation tools as well (e.g. MkDocs, Docusaurus, etc).
I think the right way to keep supporting Sphinx-only features is by creating more Sphinx extensions than adding them into the Read the Docs' build process itself. We have tested this already with some extensions and we are happy with the results, so we will probably keep moving in that direction.
BTW, thanks for pointing out where to get this information from with the current build process! 👍🏼
@benjaoming
I wouldn't use the word `GIT` in the variables, I think it just add noise and we currently support other as well. Yes, we want to deprecate them too 😄
These are my suggestions following your notes:
* `READTHEDOCS_REPOSITORY_URL` (GitHub URL, like `https://github.com/readthedocs/readthedocs.org/`)
* `READTHEDOCS_REPOSITORY_IDENTIFIER` (e.g. `main` or `v1.0.0` or `a1b2c3` --matching what's used in the `Version` object)
Thinking a little more about `READTHEDOCS_SOURCE_DIR`, what should be its value for different doctools? How we will get it correctly? What's the value for Docusaurus, or Gatsby, for example? I'm not sure we can commit ourselves to populate it with the right value. However, this is related to https://github.com/readthedocs/readthedocs.org/issues/9088
I'd move forward with the first two that are the ones that we already know, and leave the third one to continue discussing the deciding how to implement it.
| 2023-03-21T12:11:56 |
readthedocs/readthedocs.org | 10,172 | readthedocs__readthedocs.org-10172 | [
"10171"
] | 64ab38c162e402fd5ff4876c1715d8d1b24e7b3e | diff --git a/readthedocs/doc_builder/environments.py b/readthedocs/doc_builder/environments.py
--- a/readthedocs/doc_builder/environments.py
+++ b/readthedocs/doc_builder/environments.py
@@ -312,15 +312,33 @@ def run(self):
# /home/docs/.asdf/shims:/home/docs/.asdf/bin
# :/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
# docs@bfe702e31cdd:~$
- asdf_paths = (
- f"/home/{settings.RTD_DOCKER_USER}/.asdf/shims"
- f":/home/{settings.RTD_DOCKER_USER}/.asdf/bin"
- )
- if settings.RTD_DOCKER_COMPOSE:
- asdf_paths += ":/root/.asdf/shims:/root/.asdf/bin"
-
+ #
+ # On old Docker images we have different PATH:
+ #
+ # $ sudo docker run -it readthedocs/build:latest /bin/bash
+ # docs@656e38a30fa4:/$ echo $PATH
+ # /home/docs/.pyenv/shims:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/home/docs/.conda/bin:/home/docs/.pyenv/bin
+ # docs@656e38a30fa4:/$
default_paths = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- environment["PATH"] = f"{asdf_paths}:{default_paths}"
+ if self.build_env.container_image in (
+ "readthedocs/build:ubuntu-22.04",
+ "readthedocs/build:ubuntu-20.04",
+ ):
+ # Use ASDF path for newer images
+ python_paths = "/home/docs/.asdf/shims:/home/docs/.asdf/bin"
+ paths = f"{python_paths}:{default_paths}"
+
+ # On local development, we are using root user
+ if settings.RTD_DOCKER_COMPOSE:
+ paths = paths.replace("/home/docs/", "/root/")
+ else:
+ # Use PYENV for older images
+ paths = (
+ "/home/docs/.pyenv/shims:/home/docs/.cargo/bin"
+ f":{default_paths}:"
+ "/home/docs/.conda/bin:/home/docs/.pyenv/bin"
+ )
+ environment["PATH"] = paths
# Prepend the BIN_PATH if it's defined
if self.bin_path:
| Build failing with `/bin/bash: line 1: asdf: command not found`
## Details
* Read the Docs project URL: https://readthedocs.org/projects/exordium/
* Build URL (if applicable): https://readthedocs.org/projects/exordium/builds/19855052/
* Read the Docs username (if applicable): https://readthedocs.org/profiles/apocalyptech/
## Expected Result
Build should succeed
## Actual Result
Build is failing with `/bin/bash: line 1: asdf: command not found` -- I've also tried re-building old versions which used to work fine, and they're failing with the same error. I don't specify `asdf` anywhere in my repo or RTD configs, so I'm not sure where that's coming from. Let me know if I can provide any more info!
| Ditto, was just opening the same issue! I'll drop the info here since I already typed all of it :smile:
## Details
* Read the Docs project URL: Multiple including https://readthedocs.org/projects/dash-user-docs/
* Build URL (if applicable):
* Failed: https://readthedocs.org/projects/dash-user-docs/builds/19855245/
* Successful: https://readthedocs.org/projects/dash-user-docs/builds/19852825/
* Read the Docs username (if applicable): https://readthedocs.org/profiles/thephez/
## Expected Result
Project builds successfully as usual
## Actual Result
Builds failing today with this error:
```
/bin/bash: line 1: asdf: command not found
```
### Screenshots
Failed build:

Successful build of same commit earlier today:

Same :disappointed:
Failed: https://readthedocs.org/projects/crowsetta/builds/19854975/
Last successful: https://readthedocs.org/projects/crowsetta/builds/19844422/
(Hi @NickleDave !)
Similar but slightly different issue:

Failed:https://readthedocs.org/projects/pyomo/builds/19855262/
Last successful: https://readthedocs.org/projects/pyomo/builds/19854787/
| 2023-03-21T16:52:21 |
|
readthedocs/readthedocs.org | 10,216 | readthedocs__readthedocs.org-10216 | [
"10211"
] | 9cfb649bf319ac904c8eb3974d880015ac7f094d | diff --git a/readthedocs/proxito/views/hosting.py b/readthedocs/proxito/views/hosting.py
--- a/readthedocs/proxito/views/hosting.py
+++ b/readthedocs/proxito/views/hosting.py
@@ -1,11 +1,16 @@
"""Views for hosting features."""
+import packaging
import structlog
from django.conf import settings
from django.http import JsonResponse
from django.views import View
-from readthedocs.builds.constants import EXTERNAL
+from readthedocs.api.v3.serializers import (
+ BuildSerializer,
+ ProjectSerializer,
+ VersionSerializer,
+)
from readthedocs.core.mixins import CDNCacheControlMixin
from readthedocs.core.resolver import resolver
from readthedocs.core.unresolver import unresolver
@@ -13,7 +18,34 @@
log = structlog.get_logger(__name__) # noqa
+ADDONS_VERSIONS_SUPPORTED = (0, 1)
+
+
+class ClientError(Exception):
+ VERSION_NOT_CURRENTLY_SUPPORTED = (
+ "The version specified in 'X-RTD-Hosting-Integrations-Version'"
+ " is currently not supported"
+ )
+ VERSION_INVALID = "'X-RTD-Hosting-Integrations-Version' header version is invalid"
+ VERSION_HEADER_MISSING = (
+ "'X-RTD-Hosting-Integrations-Version' header attribute is required"
+ )
+
+
class ReadTheDocsConfigJson(CDNCacheControlMixin, View):
+
+ """
+ API response consumed by our JavaScript client.
+
+ The code for the JavaScript client lives at:
+ https://github.com/readthedocs/readthedocs-client/
+
+ Attributes:
+
+ url (required): absolute URL from where the request is performed
+ (e.g. ``window.location.href``)
+ """
+
def get(self, request):
url = request.GET.get("url")
@@ -23,37 +55,126 @@ def get(self, request):
status=400,
)
+ addons_version = request.headers.get("X-RTD-Hosting-Integrations-Version")
+ if not addons_version:
+ return JsonResponse(
+ {
+ "error": ClientError.VERSION_HEADER_MISSING,
+ },
+ status=400,
+ )
+ try:
+ addons_version = packaging.version.parse(addons_version)
+ if addons_version.major not in ADDONS_VERSIONS_SUPPORTED:
+ raise ClientError
+ except packaging.version.InvalidVersion:
+ return JsonResponse(
+ {
+ "error": ClientError.VERSION_INVALID,
+ },
+ status=400,
+ )
+ except ClientError:
+ return JsonResponse(
+ {"error": ClientError.VERSION_NOT_CURRENTLY_SUPPORTED},
+ status=400,
+ )
+
unresolved_domain = request.unresolved_domain
project = unresolved_domain.project
unresolved_url = unresolver.unresolve_url(url)
version = unresolved_url.version
+ filename = unresolved_url.filename
project.get_default_version()
build = version.builds.last()
- # TODO: define how it will be the exact JSON object returned here
- # NOTE: we could use the APIv3 serializers for some of these objects
- # if we want to keep consistency. However, those may require some
- # extra db calls that we probably want to avoid.
+ data = AddonsResponse().get(addons_version, project, version, build, filename)
+ return JsonResponse(data, json_dumps_params=dict(indent=4))
+
+
+class NoLinksMixin:
+
+ """Mixin to remove conflicting fields from serializers."""
+
+ FIELDS_TO_REMOVE = (
+ "_links",
+ "urls",
+ )
+
+ def __init__(self, *args, **kwargs):
+ super(NoLinksMixin, self).__init__(*args, **kwargs)
+
+ for field in self.FIELDS_TO_REMOVE:
+ if field in self.fields:
+ del self.fields[field]
+
+ if field in self.Meta.fields:
+ del self.Meta.fields[self.Meta.fields.index(field)]
+
+
+# NOTE: the following serializers are required only to remove some fields we
+# can't expose yet in this API endpoint because it running under El Proxito
+# which cannot resolve some dashboard URLs because they are not defined on El
+# Proxito.
+#
+# See https://github.com/readthedocs/readthedocs-ops/issues/1323
+class ProjectSerializerNoLinks(NoLinksMixin, ProjectSerializer):
+ pass
+
+
+class VersionSerializerNoLinks(NoLinksMixin, VersionSerializer):
+ pass
+
+
+class BuildSerializerNoLinks(NoLinksMixin, BuildSerializer):
+ pass
+
+
+class AddonsResponse:
+ def get(self, addons_version, project, version, build, filename):
+ """
+ Unique entry point to get the proper API response.
+
+ It will evaluate the ``addons_version`` passed and decide which is the
+ best JSON structure for that particular version.
+ """
+ if addons_version.major == 0:
+ return self._v0(project, version, build, filename)
+
+ if addons_version.major == 1:
+ return self._v1(project, version, build, filename)
+
+ def _v0(self, project, version, build, filename):
+ """
+ Initial JSON data structure consumed by the JavaScript client.
+
+ This response is definitely in *alpha* state currently and shouldn't be
+ used for anyone to customize their documentation or the integration
+ with the Read the Docs JavaScript client. It's under active development
+ and anything can change without notice.
+
+ It tries to follow some similarity with the APIv3 for already-known resources
+ (Project, Version, Build, etc).
+ """
+
data = {
"comment": (
"THIS RESPONSE IS IN ALPHA FOR TEST PURPOSES ONLY"
" AND IT'S GOING TO CHANGE COMPLETELY -- DO NOT USE IT!"
),
- "project": {
- "slug": project.slug,
- "language": project.language,
- "repository_url": project.repo,
- "programming_language": project.programming_language,
+ "projects": {
+ "current": ProjectSerializerNoLinks(project).data,
},
- "version": {
- "slug": version.slug,
- "external": version.type == EXTERNAL,
+ "versions": {
+ "current": VersionSerializerNoLinks(version).data,
},
- "build": {
- "id": build.pk,
+ "builds": {
+ "current": BuildSerializerNoLinks(build).data,
},
+ # TODO: consider creating one serializer per field here.
+ # The resulting JSON will be the same, but maybe it's easier/cleaner?
"domains": {
"dashboard": settings.PRODUCTION_DOMAIN,
},
@@ -62,8 +183,13 @@ def get(self, request):
"code": settings.GLOBAL_ANALYTICS_CODE,
},
},
- "features": {
+ # TODO: the ``features`` is not polished and we expect to change drastically.
+ # Mainly, all the fields including a Project, Version or Build will use the exact same
+ # serializer than the keys ``project``, ``version`` and ``build`` from the top level.
+ "addons": {
"analytics": {
+ "enabled": True,
+ # TODO: consider adding this field into the ProjectSerializer itself.
"code": project.analytics_code,
},
"external_version_warning": {
@@ -86,7 +212,7 @@ def get(self, request):
project=project,
version_slug=project.get_default_version(),
language=project.language,
- filename=unresolved_url.filename,
+ filename=filename,
),
"root_selector": "[role=main]",
"inject_styles": True,
@@ -116,6 +242,7 @@ def get(self, request):
},
},
"search": {
+ "enabled": True,
"project": project.slug,
"version": version.slug,
"api_endpoint": "/_/api/v3/search/",
@@ -139,4 +266,9 @@ def get(self, request):
if version.build_data:
data.update(version.build_data)
- return JsonResponse(data, json_dumps_params=dict(indent=4))
+ return data
+
+ def _v1(self, project, version, build, filename):
+ return {
+ "comment": "Undefined yet. Use v0 for now",
+ }
| diff --git a/readthedocs/proxito/tests/responses/v0.json b/readthedocs/proxito/tests/responses/v0.json
new file mode 100644
--- /dev/null
+++ b/readthedocs/proxito/tests/responses/v0.json
@@ -0,0 +1,120 @@
+{
+ "comment": "THIS RESPONSE IS IN ALPHA FOR TEST PURPOSES ONLY AND IT'S GOING TO CHANGE COMPLETELY -- DO NOT USE IT!",
+ "projects": {
+ "current": {
+ "created": "2019-04-29T10:00:00Z",
+ "default_branch": "master",
+ "default_version": "latest",
+ "id": 1,
+ "language": {
+ "code": "en",
+ "name": "English"
+ },
+ "modified": "2019-04-29T12:00:00Z",
+ "name": "project",
+ "programming_language": {
+ "code": "words",
+ "name": "Only Words"
+ },
+ "homepage": "http://project.com",
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/readthedocs/project"
+ },
+ "slug": "project",
+ "subproject_of": null,
+ "tags": ["project", "tag", "test"],
+ "translation_of": null,
+ "users": [
+ {
+ "username": "testuser"
+ }
+ ]
+ }
+ },
+ "versions": {
+ "current": {
+ "active": true,
+ "hidden": false,
+ "built": true,
+ "downloads": {},
+ "id": 1,
+ "identifier": "a1b2c3",
+ "ref": null,
+ "slug": "latest",
+ "type": "tag",
+ "verbose_name": "latest"
+ }
+ },
+ "builds": {
+ "current": {
+ "commit": "a1b2c3",
+ "created": "2019-04-29T10:00:00Z",
+ "duration": 60,
+ "error": "",
+ "finished": "2019-04-29T10:01:00Z",
+ "id": 1,
+ "project": "project",
+ "state": {
+ "code": "finished",
+ "name": "Finished"
+ },
+ "success": true,
+ "version": "latest"
+ }
+ },
+ "domains": {
+ "dashboard": "readthedocs.org"
+ },
+ "readthedocs": {
+ "analytics": {
+ "code": null
+ }
+ },
+ "addons": {
+ "analytics": {
+ "enabled": true,
+ "code": null
+ },
+ "external_version_warning": {
+ "enabled": true,
+ "query_selector": "[role=main]"
+ },
+ "non_latest_version_warning": {
+ "enabled": true,
+ "query_selector": "[role=main]",
+ "versions": ["latest"]
+ },
+ "doc_diff": {
+ "enabled": true,
+ "base_url": "https://project.dev.readthedocs.io/en/latest/index.html",
+ "root_selector": "[role=main]",
+ "inject_styles": true,
+ "base_host": "",
+ "base_page": ""
+ },
+ "flyout": {
+ "translations": [],
+ "versions": [{ "slug": "latest", "url": "/en/latest/" }],
+ "downloads": [],
+ "vcs": {
+ "url": "https://github.com",
+ "username": "readthedocs",
+ "repository": "test-builds",
+ "branch": "a1b2c3",
+ "filepath": "/docs/index.rst"
+ }
+ },
+ "search": {
+ "enabled": true,
+ "api_endpoint": "/_/api/v3/search/",
+ "default_filter": "subprojects:project/latest",
+ "filters": [
+ ["Search only in this project", "project:project/latest"],
+ ["Search subprojects", "subprojects:project/latest"]
+ ],
+ "project": "project",
+ "version": "latest"
+ }
+ }
+}
diff --git a/readthedocs/proxito/tests/responses/v1.json b/readthedocs/proxito/tests/responses/v1.json
new file mode 100644
--- /dev/null
+++ b/readthedocs/proxito/tests/responses/v1.json
@@ -0,0 +1,3 @@
+{
+ "comment": "Undefined yet. Use v0 for now"
+}
diff --git a/readthedocs/proxito/tests/responses/v2.json b/readthedocs/proxito/tests/responses/v2.json
new file mode 100644
--- /dev/null
+++ b/readthedocs/proxito/tests/responses/v2.json
@@ -0,0 +1,3 @@
+{
+ "error": "The version specified in 'X-RTD-Hosting-Integrations-Version' is currently not supported"
+}
diff --git a/readthedocs/proxito/tests/test_hosting.py b/readthedocs/proxito/tests/test_hosting.py
--- a/readthedocs/proxito/tests/test_hosting.py
+++ b/readthedocs/proxito/tests/test_hosting.py
@@ -1,13 +1,15 @@
"""Test hosting views."""
+import json
+from pathlib import Path
+
import django_dynamic_fixture as fixture
import pytest
-from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase, override_settings
from django.urls import reverse
-from readthedocs.builds.constants import EXTERNAL, INTERNAL, LATEST
+from readthedocs.builds.constants import LATEST
from readthedocs.builds.models import Build
from readthedocs.projects.constants import PUBLIC
from readthedocs.projects.models import Project
@@ -21,114 +23,89 @@
@pytest.mark.proxito
class TestReadTheDocsConfigJson(TestCase):
def setUp(self):
- self.user = fixture.get(User, username="user")
- self.user.set_password("user")
+ self.user = fixture.get(User, username="testuser")
+ self.user.set_password("testuser")
self.user.save()
self.project = fixture.get(
Project,
slug="project",
+ name="project",
language="en",
privacy_level=PUBLIC,
external_builds_privacy_level=PUBLIC,
- repo="git://10.10.0.1/project",
+ repo="https://github.com/readthedocs/project",
programming_language="words",
single_version=False,
users=[self.user],
main_language_project=None,
+ project_url="http://project.com",
)
+
+ for tag in ("tag", "project", "test"):
+ self.project.tags.add(tag)
+
self.project.versions.update(
privacy_level=PUBLIC,
built=True,
active=True,
- type=INTERNAL,
- identifier="1a2b3c",
+ type="tag",
+ identifier="a1b2c3",
)
self.version = self.project.versions.get(slug=LATEST)
self.build = fixture.get(
Build,
+ project=self.project,
version=self.version,
+ commit="a1b2c3",
+ length=60,
+ state="finished",
+ success=True,
+ )
+
+ def _get_response_dict(self, view_name, filepath=None):
+ filepath = filepath or __file__
+ filename = Path(filepath).absolute().parent / "responses" / f"{view_name}.json"
+ return json.load(open(filename))
+
+ def _normalize_datetime_fields(self, obj):
+ obj["projects"]["current"]["created"] = "2019-04-29T10:00:00Z"
+ obj["projects"]["current"]["modified"] = "2019-04-29T12:00:00Z"
+ obj["builds"]["current"]["created"] = "2019-04-29T10:00:00Z"
+ obj["builds"]["current"]["finished"] = "2019-04-29T10:01:00Z"
+ return obj
+
+ def test_get_config_v0(self):
+ r = self.client.get(
+ reverse("proxito_readthedocs_config_json"),
+ {"url": "https://project.dev.readthedocs.io/en/latest/"},
+ secure=True,
+ HTTP_HOST="project.dev.readthedocs.io",
+ HTTP_X_RTD_HOSTING_INTEGRATIONS_VERSION="0.1.0",
+ )
+ assert r.status_code == 200
+ assert self._normalize_datetime_fields(r.json()) == self._get_response_dict(
+ "v0"
)
- def test_get_config(self):
+ def test_get_config_v1(self):
r = self.client.get(
reverse("proxito_readthedocs_config_json"),
{"url": "https://project.dev.readthedocs.io/en/latest/"},
secure=True,
HTTP_HOST="project.dev.readthedocs.io",
+ HTTP_X_RTD_HOSTING_INTEGRATIONS_VERSION="1.0.0",
)
assert r.status_code == 200
+ assert r.json() == self._get_response_dict("v1")
- expected = {
- "comment": "THIS RESPONSE IS IN ALPHA FOR TEST PURPOSES ONLY AND IT'S GOING TO CHANGE COMPLETELY -- DO NOT USE IT!",
- "project": {
- "slug": self.project.slug,
- "language": self.project.language,
- "repository_url": self.project.repo,
- "programming_language": self.project.programming_language,
- },
- "version": {
- "slug": self.version.slug,
- "external": self.version.type == EXTERNAL,
- },
- "build": {
- "id": self.build.pk,
- },
- "domains": {
- "dashboard": settings.PRODUCTION_DOMAIN,
- },
- "readthedocs": {
- "analytics": {
- "code": None,
- }
- },
- "features": {
- "analytics": {
- "code": None,
- },
- "external_version_warning": {
- "enabled": True,
- "query_selector": "[role=main]",
- },
- "non_latest_version_warning": {
- "enabled": True,
- "query_selector": "[role=main]",
- "versions": [
- "latest",
- ],
- },
- "doc_diff": {
- "enabled": True,
- "base_url": "https://project.dev.readthedocs.io/en/latest/index.html",
- "root_selector": "[role=main]",
- "inject_styles": True,
- "base_host": "",
- "base_page": "",
- },
- "flyout": {
- "translations": [],
- "versions": [
- {"slug": "latest", "url": "/en/latest/"},
- ],
- "downloads": [],
- "vcs": {
- "url": "https://github.com",
- "username": "readthedocs",
- "repository": "test-builds",
- "branch": self.version.identifier,
- "filepath": "/docs/index.rst",
- },
- },
- "search": {
- "api_endpoint": "/_/api/v3/search/",
- "default_filter": "subprojects:project/latest",
- "filters": [
- ["Search only in this project", "project:project/latest"],
- ["Search subprojects", "subprojects:project/latest"],
- ],
- "project": "project",
- "version": "latest",
- },
- },
- }
- assert r.json() == expected
+ def test_get_config_unsupported_version(self):
+ r = self.client.get(
+ reverse("proxito_readthedocs_config_json"),
+ {"url": "https://project.dev.readthedocs.io/en/latest/"},
+ secure=True,
+ HTTP_HOST="project.dev.readthedocs.io",
+ HTTP_X_RTD_HOSTING_INTEGRATIONS_VERSION="2.0.0",
+ )
+ assert r.status_code == 400
+ assert r.json() == self._get_response_dict("v2")
| API: review `/_/readthedocs-config/` endpoint
Take a look at the structure of this response and think about how we want to adapt it to support the initial phase of the js client. See https://github.com/readthedocs/readthedocs-client/issues/26
https://github.com/readthedocs/readthedocs.org/blob/f141a8f931ffe78d6aad90a4710b7ec566994dcd/readthedocs/proxito/views/hosting.py#L35-L136
Besides, consider making the initial changes that will allow us use our current APIv3 serializers so we can standardize the way we are communicating our data in a more generic way through all our endpoints.
The idea is to use "the same structure that would be returned by the APIv3 serializers, but without using them for now" due to https://github.com/readthedocs/readthedocs-ops/issues/1323
| 2023-04-03T18:02:13 |
|
readthedocs/readthedocs.org | 10,217 | readthedocs__readthedocs.org-10217 | [
"10466"
] | 9cfb649bf319ac904c8eb3974d880015ac7f094d | diff --git a/readthedocs/settings/base.py b/readthedocs/settings/base.py
--- a/readthedocs/settings/base.py
+++ b/readthedocs/settings/base.py
@@ -622,32 +622,35 @@ def TEMPLATES(self):
'python': {
'2.7': '2.7.18',
'3.6': '3.6.15',
- '3.7': '3.7.15',
- '3.8': '3.8.15',
- '3.9': '3.9.15',
- '3.10': '3.10.8',
- '3.11': '3.11.0',
+ '3.7': '3.7.17',
+ '3.8': '3.8.17',
+ '3.9': '3.9.17',
+ '3.10': '3.10.12',
+ '3.11': '3.11.4',
'pypy3.7': 'pypy3.7-7.3.9',
- 'pypy3.8': 'pypy3.8-7.3.9',
- 'pypy3.9': 'pypy3.9-7.3.9',
+ 'pypy3.8': 'pypy3.8-7.3.11',
+ 'pypy3.9': 'pypy3.9-7.3.12',
'miniconda3-4.7': 'miniconda3-4.7.12',
'mambaforge-4.10': 'mambaforge-4.10.3-10',
},
'nodejs': {
'14': '14.20.1',
- '16': '16.18.0',
- '18': '18.11.0',
- '19': '19.0.0',
+ '16': '16.18.1',
+ '18': '18.16.1', # LTS
+ '19': '19.0.1',
+ '20': '20.3.1',
},
'rust': {
'1.55': '1.55.0',
'1.61': '1.61.0',
'1.64': '1.64.0',
+ '1.70': '1.70.0',
},
'golang': {
'1.17': '1.17.13',
- '1.18': '1.18.7',
- '1.19': '1.19.2',
+ '1.18': '1.18.10',
+ '1.19': '1.19.10',
+ '1.20': '1.20.5',
},
},
}
| Upgrade rust build environment to 1.70
## What's the problem this feature will solve?
The most recent currently available version of rust on RTD is 1.64, rust itself is at 1.70. In most cases 1.64 is fine, but there is a increasing amount of crates requiring newer features of the language, so repositories using those can not be build on RTD.
The case I was running into this was when building https://github.com/nmandery/h3ronpy :
The build log output was
```
Building wheels for collected packages: h3ronpy
Building wheel for h3ronpy (pyproject.toml): started
Building wheel for h3ronpy (pyproject.toml): finished with status 'error'
error: subprocess-exited-with-error
× Building wheel for h3ronpy (pyproject.toml) did not run successfully.
│ exit code: 1
╰─> [10 lines of output]
Running `maturin pep517 build-wheel -i /home/docs/checkouts/readthedocs.org/user_builds/h3ronpy/envs/latest/bin/python --compatibility off`
📦 Including license file "/home/docs/checkouts/readthedocs.org/user_builds/h3ronpy/checkouts/latest/LICENSE.txt"
🍹 Building a mixed python/rust project
🔗 Found pyo3 bindings with abi3 support for Python ≥ 3.8
🐍 Not using a specific python interpreter
error: package `konst v0.3.5` cannot be built because it requires rustc 1.65.0 or newer, while the currently active rustc version is 1.64.0
💥 maturin failed
Caused by: Failed to build a native library through cargo
Caused by: Cargo build finished with "exit status: 101": `PYO3_ENVIRONMENT_SIGNATURE="cpython-3.9-64bit" PYO3_PYTHON="/home/docs/checkouts/readthedocs.org/user_builds/h3ronpy/envs/latest/bin/python" PYTHON_SYS_EXECUTABLE="/home/docs/checkouts/readthedocs.org/user_builds/h3ronpy/envs/latest/bin/python" "cargo" "rustc" "--message-format" "json-render-diagnostics" "--manifest-path" "/home/docs/checkouts/readthedocs.org/user_builds/h3ronpy/checkouts/latest/Cargo.toml" "--release" "--lib"`
Error: command ['maturin', 'pep517', 'build-wheel', '-i', '/home/docs/checkouts/readthedocs.org/user_builds/h3ronpy/envs/latest/bin/python', '--compatibility', 'off'] returned non-zero exit status 1
[end of output]
note: This error originates from a subprocess, and is likely not a problem with pip.
ERROR: Failed building wheel for h3ronpy
Failed to build h3ronpy
```
## Describe the solution you'd like
Having a more recent rust version available would be great. 1.65 would seemingly be sufficient for this issue, but upgrading directly to 1.70 would certainly be more sustainable.
I saw #9692 and while I am not too familiar with how RTD works, I could try to submit a PR according to whats done there.
## Alternative solutions
I am not aware of any good workarounds.
## Additional context
-
| 2023-04-03T18:30:43 |
||
readthedocs/readthedocs.org | 10,334 | readthedocs__readthedocs.org-10334 | [
"10208"
] | 37f9806a0f96f281a2dcd0ed73b930781fa7938a | diff --git a/readthedocs/doc_builder/environments.py b/readthedocs/doc_builder/environments.py
--- a/readthedocs/doc_builder/environments.py
+++ b/readthedocs/doc_builder/environments.py
@@ -376,10 +376,13 @@ def get_wrapped_command(self):
for part in self.command
)
)
- return "/bin/sh -c '{prefix}{cmd}'".format(
- prefix=prefix,
- cmd=command,
- )
+ if prefix:
+ # Using `;` or `\n` to separate the `prefix` where we define the
+ # variables with the `command` itself, have the same effect.
+ # However, using `;` is more explicit.
+ # See https://github.com/readthedocs/readthedocs.org/pull/10334
+ return f"/bin/sh -c '{prefix}; {command}'"
+ return f"/bin/sh -c '{command}'"
def _escape_command(self, cmd):
r"""Escape the command by prefixing suspicious chars with `\`."""
| Build: dump user's command into a shell script before running it
We attemped to fix this issue multiple times and we found problems when deploying those changes to production.
This not perfect, but simple solution, could help us to support multi-line commands for now without manipulating "too much" the `PATH` variable, that was the one causing us issues.
Let me know what you think about this approach and how to move forward if you prefer a different one.
Closes #10103
Reference #10206
Reference #10172
Reference #10133
| I'm not sure if it makes sense to move forward with this PR or not due the doubts and concerns of this implementation. However, I ran out of ideas trying to solve this problem. If there are better ideas, I'm happy to implement them.
How about just passing the multiline script as a parameter to `sh -c` instead of writing it out to a file? eg.
```bash
sh -c 'export PATH={bin_path}$PATH
{command}'
```
Seems like it would require fewer changes.
Actually - thinking about it, just changing the relevant part of the code from `command = "PATH=foo {command}"` to `command = "export PATH=foo; {command}"` should do the trick, no multiline trickery needed. | 2023-05-22T11:52:23 |
|
readthedocs/readthedocs.org | 10,354 | readthedocs__readthedocs.org-10354 | [
"10348"
] | b255f7807fc7e01be785825d17f760433f5f7a19 | diff --git a/readthedocs/builds/models.py b/readthedocs/builds/models.py
--- a/readthedocs/builds/models.py
+++ b/readthedocs/builds/models.py
@@ -1069,10 +1069,20 @@ def can_rebuild(self):
def external_version_name(self):
return external_version_name(self)
- def using_latest_config(self):
- if self.config:
- return int(self.config.get('version', '1')) == LATEST_CONFIGURATION_VERSION
- return False
+ def deprecated_config_used(self):
+ """
+ Check whether this particular build is using a deprecated config file.
+
+ When using v1 or not having a config file at all, it returns ``True``.
+ Returns ``False`` only when it has a config file and it is using v2.
+
+ Note we are using this to communicate deprecation of v1 file and not using a config file.
+ See https://github.com/readthedocs/readthedocs.org/issues/10342
+ """
+ if not self.config:
+ return True
+
+ return int(self.config.get("version", "1")) != LATEST_CONFIGURATION_VERSION
def reset(self):
"""
diff --git a/readthedocs/projects/tasks/utils.py b/readthedocs/projects/tasks/utils.py
--- a/readthedocs/projects/tasks/utils.py
+++ b/readthedocs/projects/tasks/utils.py
@@ -3,9 +3,13 @@
import structlog
from celery.worker.request import Request
-from django.db.models import Q
+from django.conf import settings
+from django.contrib.auth.models import User
+from django.db.models import Q, Sum
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
+from djstripe.enums import SubscriptionStatus
+from messages_extends.constants import WARNING_PERSISTENT
from readthedocs.builds.constants import (
BUILD_FINAL_STATES,
@@ -14,7 +18,12 @@
)
from readthedocs.builds.models import Build
from readthedocs.builds.tasks import send_build_status
+from readthedocs.core.permissions import AdminPermission
from readthedocs.core.utils.filesystem import safe_rmtree
+from readthedocs.notifications import Notification, SiteNotification
+from readthedocs.notifications.backends import EmailBackend
+from readthedocs.notifications.constants import REQUIREMENT
+from readthedocs.projects.models import Project
from readthedocs.storage import build_media_storage
from readthedocs.worker import app
@@ -154,6 +163,158 @@ def send_external_build_status(version_type, build_pk, commit, status):
send_build_status.delay(build_pk, commit, status)
+class DeprecatedConfigFileSiteNotification(SiteNotification):
+
+ # TODO: mention all the project slugs here
+ # Maybe trim them to up to 5 projects to avoid sending a huge blob of text
+ failure_message = _(
+ 'Your project(s) "{{ project_slugs }}" don\'t have a configuration file. '
+ "Configuration files will <strong>soon be required</strong> by projects, "
+ "and will no longer be optional. "
+ '<a href="https://blog.readthedocs.com/migrate-configuration-v2/">Read our blog post to create one</a> ' # noqa
+ "and ensure your project continues building successfully."
+ )
+ failure_level = WARNING_PERSISTENT
+
+
+class DeprecatedConfigFileEmailNotification(Notification):
+
+ app_templates = "projects"
+ name = "deprecated_config_file_used"
+ context_object_name = "project"
+ subject = "[Action required] Add a configuration file to your project to prevent build failure"
+ level = REQUIREMENT
+
+ def send(self):
+ """Method overwritten to remove on-site backend."""
+ backend = EmailBackend(self.request)
+ backend.send(self)
+
+
[email protected](queue="web")
+def deprecated_config_file_used_notification():
+ """
+ Create a notification about not using a config file for all the maintainers of the project.
+
+ This is a scheduled task to be executed on the webs.
+ Note the code uses `.iterator` and `.only` to avoid killing the db with this query.
+ Besdies, it excludes projects with enough spam score to be skipped.
+ """
+ # Skip projects with a spam score bigger than this value.
+ # Currently, this gives us ~250k in total (from ~550k we have in our database)
+ spam_score = 300
+
+ projects = set()
+ start_datetime = datetime.datetime.now()
+ queryset = Project.objects.exclude(users__profile__banned=True)
+ if settings.ALLOW_PRIVATE_REPOS:
+ # Only send emails to active customers
+ queryset = queryset.filter(
+ organizations__stripe_subscription__status=SubscriptionStatus.active
+ )
+ else:
+ # Take into account spam score on community
+ queryset = queryset.annotate(spam_score=Sum("spam_rules__value")).filter(
+ Q(spam_score__lt=spam_score) | Q(is_spam=False)
+ )
+ queryset = queryset.only("slug", "default_version").order_by("id")
+ n_projects = queryset.count()
+
+ for i, project in enumerate(queryset.iterator()):
+ if i % 500 == 0:
+ log.info(
+ "Finding projects without a configuration file.",
+ progress=f"{i}/{n_projects}",
+ current_project_pk=project.pk,
+ current_project_slug=project.slug,
+ projects_found=len(projects),
+ time_elapsed=(datetime.datetime.now() - start_datetime).seconds,
+ )
+
+ # Only check for the default version because if the project is using tags
+ # they won't be able to update those and we will send them emails forever.
+ # We can update this query if we consider later.
+ version = (
+ project.versions.filter(slug=project.default_version).only("id").first()
+ )
+ if version:
+ build = (
+ version.builds.filter(success=True)
+ .only("_config")
+ .order_by("-date")
+ .first()
+ )
+ if build and build.deprecated_config_used():
+ projects.add(project.slug)
+
+ # Store all the users we want to contact
+ users = set()
+
+ n_projects = len(projects)
+ queryset = Project.objects.filter(slug__in=projects).order_by("id")
+ for i, project in enumerate(queryset.iterator()):
+ if i % 500 == 0:
+ log.info(
+ "Querying all the users we want to contact.",
+ progress=f"{i}/{n_projects}",
+ current_project_pk=project.pk,
+ current_project_slug=project.slug,
+ users_found=len(users),
+ time_elapsed=(datetime.datetime.now() - start_datetime).seconds,
+ )
+
+ users.update(AdminPermission.owners(project).values_list("username", flat=True))
+
+ # Only send 1 email per user,
+ # even if that user has multiple projects without a configuration file.
+ # The notification will mention all the projects.
+ queryset = User.objects.filter(username__in=users, profile__banned=False).order_by(
+ "id"
+ )
+ n_users = queryset.count()
+ for i, user in enumerate(queryset.iterator()):
+ if i % 500 == 0:
+ log.info(
+ "Sending deprecated config file notification to users.",
+ progress=f"{i}/{n_users}",
+ current_user_pk=user.pk,
+ current_user_username=user.username,
+ time_elapsed=(datetime.datetime.now() - start_datetime).seconds,
+ )
+
+ # All the projects for this user that don't have a configuration file
+ user_projects = (
+ AdminPermission.projects(user, admin=True)
+ .filter(slug__in=projects)
+ .only("slug")
+ )
+
+ user_project_slugs = ", ".join([p.slug for p in user_projects[:5]])
+ if user_projects.count() > 5:
+ user_project_slugs += " and others..."
+
+ n_site = DeprecatedConfigFileSiteNotification(
+ user=user,
+ context_object=user_projects,
+ extra_context={"project_slugs": user_project_slugs},
+ success=False,
+ )
+ n_site.send()
+
+ # TODO: uncomment this code when we are ready to send email notifications
+ # n_email = DeprecatedConfigFileEmailNotification(
+ # user=user,
+ # context_object=user_projects,
+ # extra_context={"project_slugs": user_project_slugs},
+ # )
+ # n_email.send()
+
+ log.info(
+ "Finish sending deprecated config file notifications.",
+ time_elapsed=(datetime.datetime.now() - start_datetime).seconds,
+ )
+
+
class BuildRequest(Request):
def on_timeout(self, soft, timeout):
diff --git a/readthedocs/settings/base.py b/readthedocs/settings/base.py
--- a/readthedocs/settings/base.py
+++ b/readthedocs/settings/base.py
@@ -532,6 +532,11 @@ def TEMPLATES(self):
'schedule': crontab(minute='*/15'),
'options': {'queue': 'web'},
},
+ 'weekly-config-file-notification': {
+ 'task': 'readthedocs.projects.tasks.utils.deprecated_config_file_used_notification',
+ 'schedule': crontab(day_of_week='wednesday', hour=11, minute=15),
+ 'options': {'queue': 'web'},
+ },
}
MULTIPLE_BUILD_SERVERS = [CELERY_DEFAULT_QUEUE]
| diff --git a/readthedocs/rtd_tests/tests/test_builds.py b/readthedocs/rtd_tests/tests/test_builds.py
--- a/readthedocs/rtd_tests/tests/test_builds.py
+++ b/readthedocs/rtd_tests/tests/test_builds.py
@@ -249,7 +249,7 @@ def test_build_is_stale(self):
self.assertTrue(build_two.is_stale)
self.assertFalse(build_three.is_stale)
- def test_using_latest_config(self):
+ def test_deprecated_config_used(self):
now = timezone.now()
build = get(
@@ -260,12 +260,12 @@ def test_using_latest_config(self):
state='finished',
)
- self.assertFalse(build.using_latest_config())
+ self.assertTrue(build.deprecated_config_used())
build.config = {'version': 2}
build.save()
- self.assertTrue(build.using_latest_config())
+ self.assertFalse(build.deprecated_config_used())
def test_build_is_external(self):
# Turn the build version to EXTERNAL type.
| Deprecation: add warning for required configuration
The first step that we talked about in #10342 was starting to warning projects of the upcoming changes. We definitely want to add this to the build detail page, we could also emit this as a sticky site notification on all pages. This is a more immediate change to start this process so whatever is easiest for now.
Next up, and slightly related to this, we will need to do the marketing level sort of steps. I am separating these out as these steps will come a little later than the immediate steps here.
- https://github.com/readthedocs/readthedocs.org/issues/10351
| 2023-05-25T11:55:34 |
|
readthedocs/readthedocs.org | 10,389 | readthedocs__readthedocs.org-10389 | [
"10372"
] | bdb78a0d1af64fe34be4cef347dbedacd3e3d2c6 | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -102,6 +102,7 @@
"rst-to-myst": ("https://rst-to-myst.readthedocs.io/en/stable/", None),
"rtd": ("https://docs.readthedocs.io/en/stable/", None),
"rtd-dev": ("https://dev.readthedocs.io/en/latest/", None),
+ "rtd-blog": ("https://blog.readthedocs.com/", None),
"jupyter": ("https://docs.jupyter.org/en/latest/", None),
}
| Docs: Config file deprecation warnings
* [x] Upgrade v1 docs to clearly indicate that it's removed in September 2023 and link to the v2 version
* [x] Add warning banners in various places
References https://github.com/readthedocs/readthedocs.org/issues/10342
| 2023-06-05T21:39:23 |
||
readthedocs/readthedocs.org | 10,427 | readthedocs__readthedocs.org-10427 | [
"10429"
] | 418980d4de2196464941f21bf2aaa5700f135bf6 | diff --git a/readthedocs/proxito/views/hosting.py b/readthedocs/proxito/views/hosting.py
--- a/readthedocs/proxito/views/hosting.py
+++ b/readthedocs/proxito/views/hosting.py
@@ -3,7 +3,7 @@
import packaging
import structlog
from django.conf import settings
-from django.http import JsonResponse
+from django.http import Http404, JsonResponse
from django.views import View
from readthedocs.api.v3.serializers import (
@@ -11,9 +11,10 @@
ProjectSerializer,
VersionSerializer,
)
+from readthedocs.builds.models import Version
from readthedocs.core.mixins import CDNCacheControlMixin
from readthedocs.core.resolver import resolver
-from readthedocs.core.unresolver import unresolver
+from readthedocs.core.unresolver import UnresolverError, unresolver
log = structlog.get_logger(__name__) # noqa
@@ -83,15 +84,28 @@ def get(self, request):
unresolved_domain = request.unresolved_domain
project = unresolved_domain.project
- unresolved_url = unresolver.unresolve_url(url)
- version = unresolved_url.version
- filename = unresolved_url.filename
-
- project.get_default_version()
- build = version.builds.last()
+ try:
+ unresolved_url = unresolver.unresolve_url(url)
+ version = unresolved_url.version
+ filename = unresolved_url.filename
+ build = version.builds.last()
+
+ except UnresolverError as exc:
+ # If an exception is raised and there is a ``project`` in the
+ # exception, it's a partial match. This could be because of an
+ # invalid URL path, but on a valid project domain. In this case, we
+ # continue with the ``project``, but without a ``version``.
+ # Otherwise, we return 404 NOT FOUND.
+ project = getattr(exc, "project", None)
+ if not project:
+ raise Http404() from exc
+
+ version = None
+ filename = None
+ build = None
data = AddonsResponse().get(addons_version, project, version, build, filename)
- return JsonResponse(data, json_dumps_params=dict(indent=4))
+ return JsonResponse(data, json_dumps_params={"indent": 4, "sort_keys": True})
class NoLinksMixin:
@@ -104,7 +118,7 @@ class NoLinksMixin:
)
def __init__(self, *args, **kwargs):
- super(NoLinksMixin, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
for field in self.FIELDS_TO_REMOVE:
if field in self.fields:
@@ -115,7 +129,7 @@ def __init__(self, *args, **kwargs):
# NOTE: the following serializers are required only to remove some fields we
-# can't expose yet in this API endpoint because it running under El Proxito
+# can't expose yet in this API endpoint because it's running under El Proxito
# which cannot resolve some dashboard URLs because they are not defined on El
# Proxito.
#
@@ -133,7 +147,7 @@ class BuildSerializerNoLinks(NoLinksMixin, BuildSerializer):
class AddonsResponse:
- def get(self, addons_version, project, version, build, filename):
+ def get(self, addons_version, project, version=None, build=None, filename=None):
"""
Unique entry point to get the proper API response.
@@ -158,6 +172,11 @@ def _v0(self, project, version, build, filename):
It tries to follow some similarity with the APIv3 for already-known resources
(Project, Version, Build, etc).
"""
+ versions_active_built = (
+ Version.internal.public(project=project, only_active=True, only_built=True)
+ .only("slug")
+ .order_by("slug")
+ )
data = {
"comment": (
@@ -168,10 +187,10 @@ def _v0(self, project, version, build, filename):
"current": ProjectSerializerNoLinks(project).data,
},
"versions": {
- "current": VersionSerializerNoLinks(version).data,
+ "current": VersionSerializerNoLinks(version).data if version else None,
},
"builds": {
- "current": BuildSerializerNoLinks(build).data,
+ "current": BuildSerializerNoLinks(build).data if build else None,
},
# TODO: consider creating one serializer per field here.
# The resulting JSON will be the same, but maybe it's easier/cleaner?
@@ -190,19 +209,24 @@ def _v0(self, project, version, build, filename):
"analytics": {
"enabled": True,
# TODO: consider adding this field into the ProjectSerializer itself.
+ # NOTE: it seems we are removing this feature,
+ # so we may not need the ``code`` attribute here
+ # https://github.com/readthedocs/readthedocs.org/issues/9530
"code": project.analytics_code,
},
"external_version_warning": {
"enabled": True,
- "query_selector": "[role=main]",
+ # NOTE: I think we are moving away from these selectors
+ # since we are doing floating noticications now.
+ # "query_selector": "[role=main]",
},
"non_latest_version_warning": {
"enabled": True,
- "query_selector": "[role=main]",
+ # NOTE: I think we are moving away from these selectors
+ # since we are doing floating noticications now.
+ # "query_selector": "[role=main]",
"versions": list(
- project.versions.filter(active=True)
- .only("slug")
- .values_list("slug", flat=True)
+ versions_active_built.values_list("slug", flat=True)
),
},
"doc_diff": {
@@ -213,7 +237,9 @@ def _v0(self, project, version, build, filename):
version_slug=project.get_default_version(),
language=project.language,
filename=filename,
- ),
+ )
+ if filename
+ else None,
"root_selector": "[role=main]",
"inject_styles": True,
# NOTE: `base_host` and `base_page` are not required, since
@@ -229,7 +255,7 @@ def _v0(self, project, version, build, filename):
"slug": version.slug,
"url": f"/{project.language}/{version.slug}/",
}
- for version in project.versions.filter(active=True).only("slug")
+ for version in versions_active_built
],
"downloads": [],
# TODO: get this values properly
@@ -237,14 +263,14 @@ def _v0(self, project, version, build, filename):
"url": "https://github.com",
"username": "readthedocs",
"repository": "test-builds",
- "branch": version.identifier,
+ "branch": version.identifier if version else None,
"filepath": "/docs/index.rst",
},
},
"search": {
"enabled": True,
"project": project.slug,
- "version": version.slug,
+ "version": version.slug if version else None,
"api_endpoint": "/_/api/v3/search/",
# TODO: figure it out where this data comes from
"filters": [
@@ -256,14 +282,18 @@ def _v0(self, project, version, build, filename):
"Search subprojects",
f"subprojects:{project.slug}/{version.slug}",
],
- ],
- "default_filter": f"subprojects:{project.slug}/{version.slug}",
+ ]
+ if version
+ else [],
+ "default_filter": f"subprojects:{project.slug}/{version.slug}"
+ if version
+ else None,
},
},
}
# Update this data with the one generated at build time by the doctool
- if version.build_data:
+ if version and version.build_data:
data.update(version.build_data)
return data
| diff --git a/readthedocs/proxito/tests/responses/v0.json b/readthedocs/proxito/tests/responses/v0.json
--- a/readthedocs/proxito/tests/responses/v0.json
+++ b/readthedocs/proxito/tests/responses/v0.json
@@ -80,12 +80,10 @@
"code": null
},
"external_version_warning": {
- "enabled": true,
- "query_selector": "[role=main]"
+ "enabled": true
},
"non_latest_version_warning": {
"enabled": true,
- "query_selector": "[role=main]",
"versions": ["latest"]
},
"doc_diff": {
| Addons: API endpoint is too slow
It seems we have:
1. ~200ms on the Python view
2. ~200ms to grab and retrieve all the active Versions
We can optimize 1) by handling all the unresolver exceptions (see https://github.com/readthedocs/readthedocs.org/issues/10399) and 2) by copying the db queries the FooterAPI is doing, since it's lot more optimized (https://github.com/readthedocs/readthedocs.org/blob/6434c6505a090c46e6b678e14a31a191f8621a5c/readthedocs/api/v2/views/footer_views.py#L136-L143)
New Relic shows ~400ms as average, but ~70ms median: https://onenr.io/0BQ1Mp1o9Qx
| 2023-06-13T11:36:41 |
|
readthedocs/readthedocs.org | 10,461 | readthedocs__readthedocs.org-10461 | [
"10458"
] | ee52a2f123a06eb6d4df276daadcf96885661c7a | diff --git a/readthedocs/proxito/middleware.py b/readthedocs/proxito/middleware.py
--- a/readthedocs/proxito/middleware.py
+++ b/readthedocs/proxito/middleware.py
@@ -16,6 +16,7 @@
from django.urls import reverse
from django.utils.deprecation import MiddlewareMixin
+from readthedocs.builds.models import Version
from readthedocs.core.unresolver import (
InvalidCustomDomainError,
InvalidExternalDomainError,
@@ -25,7 +26,7 @@
unresolver,
)
from readthedocs.core.utils import get_cache_tag
-from readthedocs.projects.models import Feature, Project
+from readthedocs.projects.models import Feature
from readthedocs.proxito.cache import add_cache_tags, cache_response, private_response
from readthedocs.proxito.redirects import redirect_to_https
@@ -293,20 +294,12 @@ def add_hosting_integrations_headers(self, request, response):
project_slug = getattr(request, "path_project_slug", "")
version_slug = getattr(request, "path_version_slug", "")
- if project_slug:
- project = Project.objects.get(slug=project_slug)
-
- # Check for the feature flag
- if project.has_feature(Feature.HOSTING_INTEGRATIONS):
- addons = True
- else:
- # Check if the version forces injecting the addons (e.g. using `build.commands`)
- version = (
- project.versions.filter(slug=version_slug).only("addons").first()
- )
- if version and version.addons:
- addons = True
-
+ if project_slug and version_slug:
+ addons = Version.objects.filter(
+ project__slug=project_slug,
+ slug=version_slug,
+ addons=True,
+ ).exists()
if addons:
response["X-RTD-Hosting-Integrations"] = "true"
| Proxito: improve query to check for addons
Small improvement until we work on https://github.com/readthedocs/readthedocs.org/issues/10456
| 2023-06-22T09:04:24 |
||
readthedocs/readthedocs.org | 10,515 | readthedocs__readthedocs.org-10515 | [
"10402"
] | 84f889a0e1a8a352d84a188749266fc262298471 | diff --git a/readthedocs/doc_builder/python_environments.py b/readthedocs/doc_builder/python_environments.py
--- a/readthedocs/doc_builder/python_environments.py
+++ b/readthedocs/doc_builder/python_environments.py
@@ -209,12 +209,8 @@ def install_core_requirements(self):
self.project.get_feature_value(
Feature.DEFAULT_TO_MKDOCS_0_17_3,
positive='mkdocs==0.17.3',
- negative=self.project.get_feature_value(
- Feature.USE_MKDOCS_LATEST,
- positive='mkdocs<1.1',
- negative='mkdocs',
- ),
- ),
+ negative="mkdocs",
+ )
)
else:
requirements.extend(
diff --git a/readthedocs/projects/models.py b/readthedocs/projects/models.py
--- a/readthedocs/projects/models.py
+++ b/readthedocs/projects/models.py
@@ -1927,7 +1927,6 @@ def add_features(sender, **kwargs):
DONT_INSTALL_LATEST_PIP = 'dont_install_latest_pip'
USE_SPHINX_LATEST = 'use_sphinx_latest'
DEFAULT_TO_MKDOCS_0_17_3 = 'default_to_mkdocs_0_17_3'
- USE_MKDOCS_LATEST = 'use_mkdocs_latest'
USE_SPHINX_RTD_EXT_LATEST = 'rtd_sphinx_ext_latest'
# Search related features
@@ -2034,7 +2033,6 @@ def add_features(sender, **kwargs):
DEFAULT_TO_MKDOCS_0_17_3,
_("MkDOcs: Install mkdocs 0.17.3 by default"),
),
- (USE_MKDOCS_LATEST, _("MkDocs: Use latest version of MkDocs")),
(
USE_SPHINX_RTD_EXT_LATEST,
_("Sphinx: Use latest version of the Read the Docs Sphinx extension"),
| Feature flag: MkDocs use latest has its logic inverted
Take a look at this code, https://github.com/readthedocs/readthedocs.org/blob/bcf45cd666b75d63319bc0d0b6fa18fec24967dc/readthedocs/doc_builder/python_environments.py#L212-L216
It seems when `DEFAULT_TO_MKDOCS_0_17_3=False` and `USE_MKDOCS_LATEST=True`, we are installing `mkdocs<1.1` when we should be installing `mkdocs` (latest).
Reverting this logic is easy. However, we should take a look at our data in Metabase to know how many projects this will affect and decide what/how to do.
Related #9779
| @ericholscher are we OK just inverting this logic or do we want to do something before making the change? I'd like to start installing the latest MkDocs for projects instead of an old version 😄
@humitos I'm 👍 on unbreaking mkdocs, so let's do it. Alternatively, we just remove the feature flag, if old projects were already upgraded?
> Alternatively, we just remove the feature flag
I'll keep the flag for now, but fixing the logic as the first step. We will be using #10508 sooner than later anyways. | 2023-07-06T10:33:10 |
|
readthedocs/readthedocs.org | 10,550 | readthedocs__readthedocs.org-10550 | [
"1800"
] | 752cfeb64efed81b898d0d7ed5932629cb5d2996 | diff --git a/readthedocs/projects/tasks/builds.py b/readthedocs/projects/tasks/builds.py
--- a/readthedocs/projects/tasks/builds.py
+++ b/readthedocs/projects/tasks/builds.py
@@ -574,24 +574,13 @@ def get_valid_artifact_types(self):
if artifact_type == "html":
index_html_filepath = os.path.join(artifact_directory, "index.html")
- readme_html_filepath = os.path.join(artifact_directory, "README.html")
- if not os.path.exists(index_html_filepath) and not os.path.exists(
- readme_html_filepath
- ):
- log.warning(
+ if not os.path.exists(index_html_filepath):
+ log.info(
"Failing the build. "
"HTML output does not contain an 'index.html' at its root directory.",
index_html=index_html_filepath,
- readme_html=readme_html_filepath,
)
- # TODO: uncomment this line to fail the build once we have
- # communicated with projects without an index.html or
- # README.html
- #
- # NOTE: we want to deprecate serving README.html as an
- # index.html file as well.
- #
- # raise BuildUserError(BuildUserError.BUILD_OUTPUT_HTML_NO_INDEX_FILE)
+ raise BuildUserError(BuildUserError.BUILD_OUTPUT_HTML_NO_INDEX_FILE)
if not os.path.exists(artifact_directory):
# There is no output directory.
| diff --git a/readthedocs/projects/tests/test_build_tasks.py b/readthedocs/projects/tests/test_build_tasks.py
--- a/readthedocs/projects/tests/test_build_tasks.py
+++ b/readthedocs/projects/tests/test_build_tasks.py
@@ -294,6 +294,14 @@ def test_build_updates_documentation_type(self, load_yaml_config):
)
).touch()
+ # Create an "index.html" at root to avoid failing the builds
+ pathlib.Path(
+ os.path.join(
+ self.project.artifact_path(version=self.version.slug, type_="html"),
+ "index.html",
+ )
+ ).touch()
+
self._trigger_update_docs_task()
# Update version state
@@ -438,6 +446,14 @@ def test_successful_build(
)
).touch()
+ # Create an "index.html" at root to avoid failing the builds
+ pathlib.Path(
+ os.path.join(
+ self.project.artifact_path(version=self.version.slug, type_="html"),
+ "index.html",
+ )
+ ).touch()
+
self._trigger_update_docs_task()
# It has to be called twice, ``before_start`` and ``after_return``
| Warn or Fail build if there is no `index.html` generated
Docs look broken when there isn't an index file to serve. We should probably be validating that an `index.html` or a `README.html` exists, which we will serve at the root of their docs.
| I think this is already implemented since when there is no `index.html` generated RTD generates one for us with a message which also points to the "Getting Started Guide" at #3447
@ericholscher If that's the issue, can we close this one?
Believe this should still be the case. People can still have no `index.[md|rst]` and have a build pass. We're only generating them if there is no sphinx/mkdocs config found.
Related issue #2483
For the record:
- The builds already fails if there is no index file (sphinx or empty project).
- If the user change the `master_doc` (other name than index), the build pass, but the docs at `/` gives 404.
This is also very easy to hit with mkdocs.
I don't think we have a way to indicate a `waning` in the build process. Also, mkdocs already handle the case where users have a README file, so, we can't just check for a missing index file, we could do the check in the generated docs.
Also, now sphinx-quickstart suggest an index file by default, which can help to have fewer users without an index file.
Checking for `index.html` after build instead of at build time (checking for `.md` or `.rst`) is good. I just found an example where the `index.html` comes from an static `.html` file and we were creating an `index.md` that overrides the static file the user wanted. See https://github.com/readthedocs/readthedocs.org/pull/7305
Yea -- we should be able to figure this out via ImportedFile objects, and wouldn't be a hard feature to build I think 👍
FYI, I was just bitten by this in https://github.com/executablebooks/sphinx-external-toc/issues/1#issuecomment-817374457
We did some work to start logging what are the projects where we are creating the `index.html` file automatically at https://github.com/readthedocs/readthedocs.org/pull/10471. After some weeks we will contact them telling we are removing this feature.
I'm assigning this issue to myself so I work on this in the next weeks.
We have some logs already at https://onenr.io/0LwGgpA0Yw6. It seems we have around ~114 projects (.org and .com combined)
We sent the email today 📧 🚀 | 2023-07-19T15:33:57 |
readthedocs/readthedocs.org | 10,551 | readthedocs__readthedocs.org-10551 | [
"10513"
] | b99cd41ed28e12f6774d83a80e50c59e8f80a5e5 | diff --git a/readthedocs/api/v3/filters.py b/readthedocs/api/v3/filters.py
--- a/readthedocs/api/v3/filters.py
+++ b/readthedocs/api/v3/filters.py
@@ -60,15 +60,17 @@ def get_running(self, queryset, name, value):
class RemoteRepositoryFilter(filters.FilterSet):
- name = filters.CharFilter(field_name='name', lookup_expr='icontains')
- organization = filters.CharFilter(field_name='organization__slug')
+ name = filters.CharFilter(field_name="name", lookup_expr="icontains")
+ full_name = filters.CharFilter(field_name="full_name", lookup_expr="icontains")
+ organization = filters.CharFilter(field_name="organization__slug")
class Meta:
model = RemoteRepository
fields = [
- 'name',
- 'vcs_provider',
- 'organization',
+ "name",
+ "full_name",
+ "vcs_provider",
+ "organization",
]
| diff --git a/readthedocs/api/v3/tests/test_remoterepositories.py b/readthedocs/api/v3/tests/test_remoterepositories.py
--- a/readthedocs/api/v3/tests/test_remoterepositories.py
+++ b/readthedocs/api/v3/tests/test_remoterepositories.py
@@ -103,3 +103,19 @@ def test_remote_repository_list_name_filter(self):
response_data,
self._get_response_dict('remoterepositories-list'),
)
+
+ def test_remote_repository_list_full_name_filter(self):
+ self.client.credentials(HTTP_AUTHORIZATION=f"Token {self.token.key}")
+ response = self.client.get(
+ reverse("remoterepositories-list"),
+ {"expand": ("projects," "remote_organization"), "full_name": "proj"},
+ )
+ self.assertEqual(response.status_code, 200)
+
+ response_data = response.json()
+ self.assertEqual(len(response_data["results"]), 1)
+
+ self.assertDictEqual(
+ response_data,
+ self._get_response_dict("remoterepositories-list"),
+ )
| API: allow remote repo full name query
The new dashboard is still using the API v2 remote repo API, which does not allow for expansion on project results and doesn't have all of the fields that I'd like to use in the results listing. The API v3 needs the v2 API implementation for searching by full_name, the current pattern for searching `full_name` by icontains on the v2 API works okay for now.
I didn't want to alter the v2 API further, as we should really be moving towards the v3 API, but if it's just easier to add expansion there for some reason, that is also fine.
Note: this also gives expansion on the nested projects in the result, so we can get fields like the avatar_url, etc. The current v2 search only returns the project slug and a link to the project dashboard.
| 2023-07-20T14:58:39 |
|
readthedocs/readthedocs.org | 10,554 | readthedocs__readthedocs.org-10554 | [
"9360"
] | d525dacc8d9233621b4fdaeb31073934ae828967 | diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py
new file mode 100644
--- /dev/null
+++ b/readthedocs/builds/constants_docker.py
@@ -0,0 +1,64 @@
+"""
+Define constants here to allow import them without any external dependency.
+
+There are situations where we want to have access to these values without Django installed
+(e.g. common/dockerfiles/tasks.py)
+
+Note these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.
+"""
+
+DOCKER_DEFAULT_IMAGE = "readthedocs/build"
+
+# Adding a new tool/version to this setting requires:
+#
+# - a mapping between the expected version in the config file, to the full
+# version installed via asdf (found via ``asdf list all <tool>``)
+#
+# - running the script ``./scripts/compile_version_upload.sh`` in
+# development and production environments to compile and cache the new
+# tool/version
+#
+# Note that when updating this options, you should also update the file:
+# readthedocs/rtd_tests/fixtures/spec/v2/schema.json
+RTD_DOCKER_BUILD_SETTINGS = {
+ # Mapping of build.os options to docker image.
+ "os": {
+ "ubuntu-20.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04",
+ "ubuntu-22.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04",
+ },
+ # Mapping of build.tools options to specific versions.
+ "tools": {
+ "python": {
+ "2.7": "2.7.18",
+ "3.6": "3.6.15",
+ "3.7": "3.7.17",
+ "3.8": "3.8.17",
+ "3.9": "3.9.17",
+ "3.10": "3.10.12",
+ "3.11": "3.11.4",
+ # Always point to the latest stable release.
+ "3": "3.11.4",
+ "miniconda3-4.7": "miniconda3-4.7.12",
+ "mambaforge-4.10": "mambaforge-4.10.3-10",
+ },
+ "nodejs": {
+ "14": "14.20.1",
+ "16": "16.18.1",
+ "18": "18.16.1", # LTS
+ "19": "19.0.1",
+ "20": "20.3.1",
+ },
+ "rust": {
+ "1.55": "1.55.0",
+ "1.61": "1.61.0",
+ "1.64": "1.64.0",
+ "1.70": "1.70.0",
+ },
+ "golang": {
+ "1.17": "1.17.13",
+ "1.18": "1.18.10",
+ "1.19": "1.19.10",
+ "1.20": "1.20.5",
+ },
+ },
+}
diff --git a/readthedocs/config/config.py b/readthedocs/config/config.py
--- a/readthedocs/config/config.py
+++ b/readthedocs/config/config.py
@@ -10,6 +10,7 @@
from django.conf import settings
+from readthedocs.builds import constants_docker
from readthedocs.config.utils import list_to_dict, to_dict
from readthedocs.core.utils.filesystem import safe_open
from readthedocs.projects.constants import GENERIC
@@ -353,7 +354,7 @@ def get_valid_python_versions_for_image(self, build_image):
"""
if build_image not in settings.DOCKER_IMAGE_SETTINGS:
build_image = '{}:{}'.format(
- settings.DOCKER_DEFAULT_IMAGE,
+ constants_docker.DOCKER_DEFAULT_IMAGE,
self.default_build_image,
)
return settings.DOCKER_IMAGE_SETTINGS[build_image]['python']['supported_versions']
@@ -375,7 +376,7 @@ def get_default_python_version_for_image(self, build_image, python_version):
"""
if build_image not in settings.DOCKER_IMAGE_SETTINGS:
build_image = '{}:{}'.format(
- settings.DOCKER_DEFAULT_IMAGE,
+ constants_docker.DOCKER_DEFAULT_IMAGE,
self.default_build_image,
)
return (
@@ -488,7 +489,7 @@ def validate_build(self):
if ':' not in build['image']:
# Prepend proper image name to user's image name
build['image'] = '{}:{}'.format(
- settings.DOCKER_DEFAULT_IMAGE,
+ constants_docker.DOCKER_DEFAULT_IMAGE,
build['image'],
)
# Update docker default settings from image name
@@ -873,7 +874,7 @@ def validate_old_build_config(self):
with self.catch_validation_error('build.image'):
image = self.pop_config('build.image', self.default_build_image)
build['image'] = '{}:{}'.format(
- settings.DOCKER_DEFAULT_IMAGE,
+ constants_docker.DOCKER_DEFAULT_IMAGE,
validate_choice(
image,
self.valid_build_images,
diff --git a/readthedocs/settings/base.py b/readthedocs/settings/base.py
--- a/readthedocs/settings/base.py
+++ b/readthedocs/settings/base.py
@@ -12,7 +12,7 @@
from readthedocs.core.logs import shared_processors
from corsheaders.defaults import default_headers
from readthedocs.core.settings import Settings
-
+from readthedocs.builds import constants_docker
try:
import readthedocsext # noqa
@@ -558,10 +558,9 @@ def TEMPLATES(self):
RTD_DOCKER_COMPOSE = False
- DOCKER_DEFAULT_IMAGE = 'readthedocs/build'
DOCKER_VERSION = 'auto'
DOCKER_DEFAULT_VERSION = 'latest'
- DOCKER_IMAGE = '{}:{}'.format(DOCKER_DEFAULT_IMAGE, DOCKER_DEFAULT_VERSION)
+ DOCKER_IMAGE = '{}:{}'.format(constants_docker.DOCKER_DEFAULT_IMAGE, DOCKER_DEFAULT_VERSION)
DOCKER_IMAGE_SETTINGS = {
# A large number of users still have this pinned in their config file.
# We must have documented it at some point.
@@ -620,60 +619,7 @@ def TEMPLATES(self):
})
# Additional binds for the build container
RTD_DOCKER_ADDITIONAL_BINDS = {}
-
- # Adding a new tool/version to this setting requires:
- #
- # - a mapping between the expected version in the config file, to the full
- # version installed via asdf (found via ``asdf list all <tool>``)
- #
- # - running the script ``./scripts/compile_version_upload.sh`` in
- # development and production environments to compile and cache the new
- # tool/version
- #
- # Note that when updating this options, you should also update the file:
- # readthedocs/rtd_tests/fixtures/spec/v2/schema.json
- RTD_DOCKER_BUILD_SETTINGS = {
- # Mapping of build.os options to docker image.
- 'os': {
- 'ubuntu-20.04': f'{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04',
- 'ubuntu-22.04': f'{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04',
- },
- # Mapping of build.tools options to specific versions.
- 'tools': {
- 'python': {
- '2.7': '2.7.18',
- '3.6': '3.6.15',
- '3.7': '3.7.17',
- '3.8': '3.8.17',
- '3.9': '3.9.17',
- '3.10': '3.10.12',
- '3.11': '3.11.4',
- 'miniconda3-4.7': 'miniconda3-4.7.12',
- 'mambaforge-4.10': 'mambaforge-4.10.3-10',
- },
- 'nodejs': {
- '14': '14.20.1',
- '16': '16.18.1',
- '18': '18.16.1', # LTS
- '19': '19.0.1',
- '20': '20.3.1',
- },
- 'rust': {
- '1.55': '1.55.0',
- '1.61': '1.61.0',
- '1.64': '1.64.0',
- '1.70': '1.70.0',
- },
- 'golang': {
- '1.17': '1.17.13',
- '1.18': '1.18.10',
- '1.19': '1.19.10',
- '1.20': '1.20.5',
- },
- },
- }
- # Always point to the latest stable release.
- RTD_DOCKER_BUILD_SETTINGS['tools']['python']['3'] = RTD_DOCKER_BUILD_SETTINGS['tools']['python']['3.11']
+ RTD_DOCKER_BUILD_SETTINGS = constants_docker.RTD_DOCKER_BUILD_SETTINGS
# This is used for the image used to clone the users repo,
# since we can't read their config file image choice before cloning
RTD_DOCKER_CLONE_IMAGE = RTD_DOCKER_BUILD_SETTINGS["os"]["ubuntu-22.04"]
| Development: document how to pre-compile `build.tools`
When building locally using `build.tools`, it will try to compile a Python version on each build. This is because the documentation does not explain how to pre-compile these tools and upload them to the S3 bucket.
We should document the usage of `scripts/compile_version_upload_s3.sh` script into its own section, probably. See https://github.com/readthedocs/readthedocs.org/blob/d6814fdebdf8778db7d3a5cbfd2f54947574ea52/scripts/compile_version_upload_s3.sh
| 2023-07-21T10:03:48 |
||
readthedocs/readthedocs.org | 10,560 | readthedocs__readthedocs.org-10560 | [
"9752"
] | 3eae8f51fc4bb670f89fb47abd5ce23b5cd95e9a | diff --git a/readthedocs/doc_builder/python_environments.py b/readthedocs/doc_builder/python_environments.py
--- a/readthedocs/doc_builder/python_environments.py
+++ b/readthedocs/doc_builder/python_environments.py
@@ -70,15 +70,15 @@ def install_package(self, install):
','.join(install.extra_requirements)
)
self.build_env.run(
- self.venv_bin(filename='python'),
- '-m',
- 'pip',
- 'install',
- '--upgrade',
- '--upgrade-strategy',
- 'eager',
- '--no-cache-dir',
- '{path}{extra_requirements}'.format(
+ self.venv_bin(filename="python"),
+ "-m",
+ "pip",
+ "install",
+ "--upgrade",
+ "--upgrade-strategy",
+ "only-if-needed",
+ "--no-cache-dir",
+ "{path}{extra_requirements}".format(
path=local_path,
extra_requirements=extra_req_param,
),
| diff --git a/readthedocs/projects/tests/test_build_tasks.py b/readthedocs/projects/tests/test_build_tasks.py
--- a/readthedocs/projects/tests/test_build_tasks.py
+++ b/readthedocs/projects/tests/test_build_tasks.py
@@ -1509,7 +1509,7 @@ def test_python_install_pip(self, load_yaml_config):
"install",
"--upgrade",
"--upgrade-strategy",
- "eager",
+ "only-if-needed",
"--no-cache-dir",
".",
cwd=mock.ANY,
@@ -1550,7 +1550,7 @@ def test_python_install_pip_extras(self, load_yaml_config):
"install",
"--upgrade",
"--upgrade-strategy",
- "eager",
+ "only-if-needed",
"--no-cache-dir",
".[docs]",
cwd=mock.ANY,
@@ -1594,7 +1594,7 @@ def test_python_install_pip_several_options(self, load_yaml_config):
"install",
"--upgrade",
"--upgrade-strategy",
- "eager",
+ "only-if-needed",
"--no-cache-dir",
".[docs]",
cwd=mock.ANY,
| Build: `--upgrade-strategy eager` upgrade packages that were already pinned
## Details
* Read the Docs project URL: https://readthedocs.org/projects/qdax/
* Build URL (if applicable): https://readthedocs.org/projects/qdax/builds/18693487/
* Read the Docs username (if applicable): https://readthedocs.org/profiles/felixchalumeau/
A recent PR from the project QDax has it's readthedocs's stage of CI [broken](https://github.com/adaptive-intelligent-robotics/QDax/pull/108) although there has been no core change in this PR. More generally speeking, all our recents PRs have a broken documentation, although they branch from develop that had a working CI.
Interestingly, I cannot reproduce the error when I try to serve the documentation on my local computer with `mkdocs serve` (everything is fine locally).
## Expected Result
I would expect the docs to build.
## Actual Result
Cf. to details section of this issue, the doc does not build on readthedocs while no breaking change happened in the code and while it seems to work locally.
| Hi @felixchalumeau! Make sure you are using the exact same versions of Python packages locally than on Read the Docs build environment. Take a look at https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
At first sight, I'd guess there is a version difference in `pytkdocs` since it's where the dictionary key is not found.
Hi @humitos
Thanks for you quick answer!
First, good news, I fixed my issue. It was not due to docs requirements but to other requirements from the project.
Second, I wanted to ask you a question about the situation that I encounter. My project is indeed using the good practices presented in the link your shared. But some uncertainty in the requirements used in the final build of the docs comes from the last step from the process in readthedocs : `pip install --upgrade --upgrade-strategy eager --no-cache-dir .`, which overrides some dependencies from `requirements.txt` by using those from `setup.py`.
This creates some uncertainties because it seems that the good practice is to not fix the dependencies in setup.py files.
This is what happened in my case. One of my dependencies is pinned in my requirements but not in the setup, and was hence updated during the `pip install .` and created the issue.
Do you think it would be possible/better to install the package while keeping the exact requirements given in the requirement files?
Hrm, yeah. I'm not sure why we are using `--upgrade-strategy eager`
> eager - all packages will be upgraded to the latest possible version. It should be noted here that pip’s current resolution algorithm isn’t even aware of packages other than those specified on the command line, and those identified as dependencies. This may or may not be true of the new resolver.
It's weird to me that we are telling `pip` to upgrade all the packages at this step since the packages where already installed from pinned version in the `requirements.txt` 🤔 . I suppose we could review old issues and discussions about this to have a better answer here.
As a workaround, you could install your package manually by using `build.jobs.post_install` and remove the `python.install.method`:
```yaml
build:
jobs:
post_install:
- pip install .
```
_(read the docs at https://docs.readthedocs.io/en/latest/build-customization.html)_
However, I'd say that Read the Docs should not use `eager` upgrade method since that definitely bring unexpected situations like this one.
@felixchalumeau We have a loot of good experiences with pip-tools. We use it to manage two versions of our requirements: The loosely pinned requirements, and the exactly pinned requirements.
See more: https://github.com/jazzband/pip-tools
Running into this issue today specifically due to the need to pin some geospatial libraries (GDAL). Would be great to have some control over these install flags! | 2023-07-24T08:49:12 |
readthedocs/readthedocs.org | 10,572 | readthedocs__readthedocs.org-10572 | [
"10564"
] | 752cfeb64efed81b898d0d7ed5932629cb5d2996 | diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py
--- a/readthedocs/builds/constants_docker.py
+++ b/readthedocs/builds/constants_docker.py
@@ -40,6 +40,7 @@
"3": "3.11.4",
"miniconda3-4.7": "miniconda3-4.7.12",
"mambaforge-4.10": "mambaforge-4.10.3-10",
+ "mambaforge-22.9": "mambaforge-22.9.0-3",
},
"nodejs": {
"14": "14.20.1",
| diff --git a/readthedocs/rtd_tests/fixtures/spec/v2/schema.json b/readthedocs/rtd_tests/fixtures/spec/v2/schema.json
--- a/readthedocs/rtd_tests/fixtures/spec/v2/schema.json
+++ b/readthedocs/rtd_tests/fixtures/spec/v2/schema.json
@@ -177,7 +177,8 @@
"3.10",
"3.11",
"miniconda3-4.7",
- "mambaforge-4.10"
+ "mambaforge-4.10",
+ "mambaforge-22.9"
]
},
"nodejs": {
| Most recent available `mambaforge=4.10` is simply too old
Hello guys, just wanted to ask you if it's possible to have a more modern version available for `mambaforge` - the best and latest available to be sourced on RTD via the configuration file is 4.10 which is simply too old (maximum conda 4.10 and mamba 0.19) - updating to a modern mamba doesn't work, as you can see from me changing the conf file in https://github.com/ESMValGroup/ESMValTool/pull/3310/files with output in https://readthedocs.org/projects/esmvaltool/builds/21390633/ - mamba is stuck at 0.19.0, which, in turn, slows down the environment creation process to around 10 minutes (for more recent conda's, updating mamba to something like >=1.4.8 works very well, and updates conda to 23.3 or 23.4 too, but in this case the base versions are too old). If you need any help whatsoever, I offer to help, and once more, many thanks for your great work on RTD :beer:
| Hi @valeriupredoi! You can update the Mamba version yourself by modifying this example we have in the documentation for Conda: https://docs.readthedocs.io/en/latest/build-customization.html#update-conda-version. Let me know if that works fine for your case.
hi @humitos many thanks for getting back to me :beer: That example I tried it before opening this issue - it works in that it updates conda to the latest possible for that old version, respectively 4.7.12 but that's no good because it's still very old, please see the rerun I just did with that example: https://readthedocs.org/projects/esmvaltool/builds/21401372/ and the modded config file https://github.com/ESMValGroup/ESMValTool/pull/3310/files - the problem is, with conda, after a while they stop allowing upgrades to the latest version if the base version is too old - that's why they keep releasing updated installers. I am not too fussed about conda that much (since we're not using it as a solver anymore), our problem is that mamba suffers from the same issue ie not possible to be updated if the base version is too old - https://readthedocs.org/projects/esmvaltool/builds/21401372/ - for some odd reason (something with RTD machines being very busy or mamba got itself into an infinite loop) this hasn't finished, it would usually take about 7-800 seconds
(from the phone 📱)
What happens if you try that example but using Mamba instead of Conda? That's what I meant.
So, "build.tools.python: mambaforge...", and then "mamba update .... mamba"
Does that work for your use case?
that's a very good idea, cheers muchly - and indeed it works! My mamba (and subsequently conda) are updated to latest - https://readthedocs.org/projects/esmvaltool/builds/21404188/ but for some reason the steps are duplicated, any ideas why that'd be? See the config file too https://github.com/ESMValGroup/ESMValTool/pull/3310/files | 2023-07-26T11:00:42 |
readthedocs/readthedocs.org | 10,603 | readthedocs__readthedocs.org-10603 | [
"10392"
] | 531aaa84cf9f69ba718e5b4183fad9a62e705373 | diff --git a/readthedocs/projects/forms.py b/readthedocs/projects/forms.py
--- a/readthedocs/projects/forms.py
+++ b/readthedocs/projects/forms.py
@@ -86,7 +86,7 @@ class ProjectBasicsForm(ProjectForm):
class Meta:
model = Project
- fields = ("name", "repo", "default_branch")
+ fields = ("name", "repo", "default_branch", "language")
remote_repository = forms.IntegerField(
widget=forms.HiddenInput(),
@@ -94,13 +94,7 @@ class Meta:
)
def __init__(self, *args, **kwargs):
- show_advanced = kwargs.pop('show_advanced', False)
super().__init__(*args, **kwargs)
- if show_advanced:
- self.fields['advanced'] = forms.BooleanField(
- required=False,
- label=_('Edit advanced project options'),
- )
self.fields['repo'].widget.attrs['placeholder'] = self.placehold_repo()
self.fields['repo'].widget.attrs['required'] = True
diff --git a/readthedocs/projects/views/mixins.py b/readthedocs/projects/views/mixins.py
--- a/readthedocs/projects/views/mixins.py
+++ b/readthedocs/projects/views/mixins.py
@@ -100,12 +100,11 @@ class ProjectImportMixin:
"""Helpers to import a Project."""
- def finish_import_project(self, request, project, tags=None):
+ def finish_import_project(self, request, project):
"""
Perform last steps to import a project into Read the Docs.
- Add the user from request as maintainer
- - Set all the tags to the project
- Send Django Signal
- Trigger initial build
@@ -115,18 +114,11 @@ def finish_import_project(self, request, project, tags=None):
:param project: Project instance just imported (already saved)
:param tags: tags to add to the project
"""
- if not tags:
- tags = []
-
project.users.add(request.user)
- for tag in tags:
- project.tags.add(tag)
-
log.info(
'Project imported.',
project_slug=project.slug,
user_username=request.user.username,
- tags=tags,
)
# TODO: this signal could be removed, or used for sync task
diff --git a/readthedocs/projects/views/private.py b/readthedocs/projects/views/private.py
--- a/readthedocs/projects/views/private.py
+++ b/readthedocs/projects/views/private.py
@@ -54,7 +54,6 @@
ProjectAdvertisingForm,
ProjectBasicsForm,
ProjectConfigForm,
- ProjectExtraForm,
ProjectRelationshipForm,
RedirectForm,
TranslationForm,
@@ -97,7 +96,6 @@ class ProjectDashboard(PrivateViewMixin, ListView):
model = Project
template_name = 'projects/project_dashboard.html'
- # pylint: disable=arguments-differ
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Set the default search to search files instead of projects
@@ -134,7 +132,7 @@ def get_queryset(self):
def get(self, request, *args, **kwargs):
self.validate_primary_email(request.user)
- return super(ProjectDashboard, self).get(self, request, *args, **kwargs)
+ return super().get(self, request, *args, **kwargs)
class ProjectMixin(PrivateViewMixin):
@@ -263,9 +261,7 @@ class ImportWizardView(ProjectImportMixin, PrivateViewMixin, SessionWizardView):
form_list = [
("basics", ProjectBasicsForm),
("config", ProjectConfigForm),
- ("extra", ProjectExtraForm),
]
- condition_dict = {'extra': lambda self: self.is_advanced()}
initial_dict_key = 'initial-data'
@@ -283,7 +279,7 @@ def _set_initial_dict(self):
else:
self.initial_dict = self.storage.data.get(self.initial_dict_key, {})
- def post(self, *args, **kwargs): # pylint: disable=arguments-differ
+ def post(self, *args, **kwargs):
self._set_initial_dict()
log.bind(user_username=self.request.user.username)
@@ -299,8 +295,6 @@ def get_form_kwargs(self, step=None):
"""Get args to pass into form instantiation."""
kwargs = {}
kwargs['user'] = self.request.user
- if step == 'basics':
- kwargs['show_advanced'] = True
return kwargs
def get_template_names(self):
@@ -315,32 +309,17 @@ def done(self, form_list, **kwargs):
other side effects for now, by signalling a save without commit. Then,
finish by added the members to the project and saving.
"""
- form_data = self.get_all_cleaned_data()
- extra_fields = ProjectExtraForm.Meta.fields
basics_form = form_list[0]
# Save the basics form to create the project instance, then alter
# attributes directly from other forms
project = basics_form.save()
- # Remove tags to avoid setting them in raw instead of using ``.add``
- tags = form_data.pop('tags', [])
-
- for field, value in list(form_data.items()):
- if field in extra_fields:
- setattr(project, field, value)
- project.save()
-
- self.finish_import_project(self.request, project, tags)
+ self.finish_import_project(self.request, project)
return HttpResponseRedirect(
reverse('projects_detail', args=[project.slug]),
)
- def is_advanced(self):
- """Determine if the user selected the `show advanced` field."""
- data = self.get_cleaned_data_for_step('basics') or {}
- return data.get('advanced', True)
-
class ImportView(PrivateViewMixin, TemplateView):
@@ -931,7 +910,6 @@ class IntegrationWebhookSync(IntegrationMixin, GenericView):
"""
def post(self, request, *args, **kwargs):
- # pylint: disable=unused-argument
if 'integration_pk' in kwargs:
integration = self.get_integration()
update_webhook(self.get_project(), integration, request=request)
| diff --git a/readthedocs/rtd_tests/tests/test_project_forms.py b/readthedocs/rtd_tests/tests/test_project_forms.py
--- a/readthedocs/rtd_tests/tests/test_project_forms.py
+++ b/readthedocs/rtd_tests/tests/test_project_forms.py
@@ -74,9 +74,10 @@ def test_import_repo_url(self):
with override_settings(ALLOW_PRIVATE_REPOS=False):
for url, valid in public_urls:
initial = {
- 'name': 'foo',
- 'repo_type': 'git',
- 'repo': url,
+ "name": "foo",
+ "repo_type": "git",
+ "repo": url,
+ "language": "en",
}
form = ProjectBasicsForm(initial)
self.assertEqual(form.is_valid(), valid, msg=url)
@@ -84,18 +85,20 @@ def test_import_repo_url(self):
with override_settings(ALLOW_PRIVATE_REPOS=True):
for url, valid in private_urls:
initial = {
- 'name': 'foo',
- 'repo_type': 'git',
- 'repo': url,
+ "name": "foo",
+ "repo_type": "git",
+ "repo": url,
+ "language": "en",
}
form = ProjectBasicsForm(initial)
self.assertEqual(form.is_valid(), valid, msg=url)
def test_empty_slug(self):
initial = {
- 'name': "''",
- 'repo_type': 'git',
- 'repo': 'https://github.com/user/repository',
+ "name": "''",
+ "repo_type": "git",
+ "repo": "https://github.com/user/repository",
+ "language": "en",
}
form = ProjectBasicsForm(initial)
self.assertFalse(form.is_valid())
@@ -112,9 +115,10 @@ def test_changing_vcs_should_not_change_latest_is_not_none(self):
form = ProjectBasicsForm(
{
- 'repo': 'http://github.com/test/test',
- 'name': 'name',
- 'repo_type': REPO_TYPE_GIT,
+ "repo": "http://github.com/test/test",
+ "name": "name",
+ "repo_type": REPO_TYPE_GIT,
+ "language": "en",
},
instance=project,
)
@@ -144,11 +148,14 @@ def test_length_of_tags(self):
self.assertDictEqual(form.errors, {'tags': [error_msg]})
def test_strip_repo_url(self):
- form = ProjectBasicsForm({
- 'name': 'foo',
- 'repo_type': 'git',
- 'repo': 'https://github.com/rtfd/readthedocs.org/'
- })
+ form = ProjectBasicsForm(
+ {
+ "name": "foo",
+ "repo_type": "git",
+ "repo": "https://github.com/rtfd/readthedocs.org/",
+ "language": "en",
+ }
+ )
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data['repo'],
diff --git a/readthedocs/rtd_tests/tests/test_project_views.py b/readthedocs/rtd_tests/tests/test_project_views.py
--- a/readthedocs/rtd_tests/tests/test_project_views.py
+++ b/readthedocs/rtd_tests/tests/test_project_views.py
@@ -69,10 +69,11 @@ class TestBasicsForm(WizardTestCase):
def setUp(self):
self.user = get(User)
- self.step_data['basics'] = {
- 'name': 'foobar',
- 'repo': 'http://example.com/foobar',
- 'repo_type': 'git',
+ self.step_data["basics"] = {
+ "name": "foobar",
+ "repo": "http://example.com/foobar",
+ "repo_type": "git",
+ "language": "en",
}
self.step_data["config"] = {
"confirm": True,
@@ -199,10 +200,6 @@ def setUp(self):
}
def test_initial_params(self):
- extra_initial = {
- 'description': 'An amazing project',
- 'project_url': "https://foo.bar",
- }
config_initial = {
"confirm": True,
}
@@ -213,7 +210,7 @@ def test_initial_params(self):
'default_branch': 'main',
'remote_repository': '',
}
- initial = dict(**extra_initial, **config_initial, **basic_initial)
+ initial = dict(**config_initial, **basic_initial)
self.client.force_login(self.user)
# User selects a remote repo to import.
@@ -223,61 +220,17 @@ def test_initial_params(self):
form = resp.context_data['form']
self.assertEqual(form.initial, basic_initial)
- # User selects advanced.
- basic_initial['advanced'] = True
- step_data = {
- f'basics-{k}': v
- for k, v in basic_initial.items()
- }
- step_data[f'{self.wizard_class_slug}-current_step'] = 'basics'
- resp = self.client.post(self.url, step_data)
-
- step_data = {f"config-{k}": v for k, v in config_initial.items()}
- step_data[f"{self.wizard_class_slug}-current_step"] = "config"
- resp = self.client.post(self.url, step_data)
-
- # The correct initial data for the advanced form is set.
- form = resp.context_data['form']
- self.assertEqual(form.initial, extra_initial)
-
def test_form_pass(self):
"""Test all forms pass validation."""
resp = self.post_step("basics")
self.assertWizardResponse(resp, "config")
resp = self.post_step("config", session=list(resp._request.session.items()))
- self.assertWizardResponse(resp, "extra")
- self.assertEqual(resp.status_code, 200)
- resp = self.post_step('extra', session=list(resp._request.session.items()))
self.assertIsInstance(resp, HttpResponseRedirect)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['location'], '/projects/foobar/')
proj = Project.objects.get(name='foobar')
self.assertIsNotNone(proj)
- data = self.step_data['basics']
- del data['advanced']
- del self.step_data['extra']['tags']
- self.assertCountEqual(
- [tag.name for tag in proj.tags.all()],
- ['bar', 'baz', 'foo'],
- )
- data.update(self.step_data['extra'])
- for (key, val) in list(data.items()):
- self.assertEqual(getattr(proj, key), val)
-
- def test_form_missing_extra(self):
- """Submit extra form with missing data, expect to get failures."""
- # Remove extra data to trigger validation errors
- self.step_data['extra'] = {}
-
- resp = self.post_step("basics")
- self.assertWizardResponse(resp, "config")
- resp = self.post_step("config", session=list(resp._request.session.items()))
- self.assertWizardResponse(resp, "extra")
- resp = self.post_step("extra", session=list(resp._request.session.items()))
-
- self.assertWizardFailure(resp, 'language')
- self.assertWizardFailure(resp, 'documentation_type')
def test_remote_repository_is_added(self):
remote_repo = get(RemoteRepository, default_branch="default-branch")
@@ -292,8 +245,6 @@ def test_remote_repository_is_added(self):
resp = self.post_step("basics")
self.assertWizardResponse(resp, "config")
resp = self.post_step("config", session=list(resp._request.session.items()))
- self.assertWizardResponse(resp, "extra")
- resp = self.post_step("extra", session=list(resp._request.session.items()))
self.assertIsInstance(resp, HttpResponseRedirect)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['location'], '/projects/foobar/')
| Project import: "Edit Advanced Options"
In #10356, we added a new step about the configuration file. It prompts the user to add a `.readthedocs.yaml` configuration file before continuing.
This means that "Edit Advanced Options" in the previous step is dangling a bit. I think that adding a configuration step takes the users focus away from their previous "Edit Advanced Options" choice.

I think there are 2 possible choices here:
1) Move the checkbox option to the configuration step.
2) Remove it entirely :100: After adding a configuration file, the most important thing for all users is to see that a build is running and get a failure/warning when a .readthedocs.yaml wasn't found or is invalid. Also, the previous use case of visiting "Advanced Options" largely does the same as what the config file offers!
CC: @agjohnson @humitos
Related: https://github.com/readthedocs/readthedocs.org/issues/10342
| Note that the "Advanced options" are completely useless actually:

I'd vote to completely remove this step and move the "language" field to the initial step, which is the only important one (tied to the URL of the project). Those other "Extra details" (as the title says), should be found by the user later, not at import time, IMO.
> I'd vote to completely remove this step and move the "language" field to the initial step
It's a great idea to let the user choose the language in the initial import step. Is the step you mean?
I :100: agree that we should completely remove the "Edit advanced project options" checkbox. | 2023-08-06T12:07:15 |
readthedocs/readthedocs.org | 10,610 | readthedocs__readthedocs.org-10610 | [
"10404"
] | df31c79521c47ad48aa96de247cb4f6b26dbd071 | diff --git a/readthedocs/profiles/views.py b/readthedocs/profiles/views.py
--- a/readthedocs/profiles/views.py
+++ b/readthedocs/profiles/views.py
@@ -61,8 +61,7 @@ def get_object(self):
def get_success_url(self):
return reverse(
- 'profiles_profile_detail',
- kwargs={'username': self.request.user.username},
+ "profiles_profile_edit",
)
| diff --git a/readthedocs/rtd_tests/tests/test_profile_views.py b/readthedocs/rtd_tests/tests/test_profile_views.py
--- a/readthedocs/rtd_tests/tests/test_profile_views.py
+++ b/readthedocs/rtd_tests/tests/test_profile_views.py
@@ -36,6 +36,7 @@ def test_edit_profile(self):
},
)
self.assertTrue(resp.status_code, 200)
+ self.assertEqual(resp["Location"], "/accounts/edit/")
self.user.refresh_from_db()
self.user.profile.refresh_from_db()
| Change profile edit form success page
Currently, when a user saves the profile edit form, the success page is not the profile form page, the user gets redirected to the profile public view page. This is quite confusing UX but might be baked into Allauth. I would expect this end up on the profile edit form page instead.
| Moving this off of the dashboard issue tracker, it's an application issue | 2023-08-08T12:50:48 |
readthedocs/readthedocs.org | 10,656 | readthedocs__readthedocs.org-10656 | [
"10411"
] | 13968498732691467a83233827a700f82d5a12e2 | diff --git a/readthedocs/projects/forms.py b/readthedocs/projects/forms.py
--- a/readthedocs/projects/forms.py
+++ b/readthedocs/projects/forms.py
@@ -1,4 +1,5 @@
"""Project forms."""
+
import json
from random import choice
from re import fullmatch
@@ -97,6 +98,109 @@ class ProjectBackendForm(forms.Form):
backend = forms.CharField()
+class ProjectPRBuildsMixin(PrevalidatedForm):
+
+ """
+ Mixin that provides a method to setup the external builds option.
+
+ TODO: Remove this once we migrate to the new dashboard,
+ and we don't need to support the old project settings form.
+ """
+
+ def has_supported_integration(self, integrations):
+ supported_types = {Integration.GITHUB_WEBHOOK, Integration.GITLAB_WEBHOOK}
+ for integration in integrations:
+ if integration.integration_type in supported_types:
+ return True
+ return False
+
+ def can_build_external_versions(self, integrations):
+ """
+ Check if external versions can be enabled for this project.
+
+ A project can build external versions if:
+
+ - They are using GitHub or GitLab.
+ - The repository's webhook is setup to send pull request events.
+
+ If the integration's provider data isn't set,
+ it could mean that the user created the integration manually,
+ and doesn't have an account connected.
+ So we don't know for sure if the webhook is sending pull request events.
+ """
+ for integration in integrations:
+ provider_data = integration.provider_data
+ if integration.integration_type == Integration.GITHUB_WEBHOOK and (
+ not provider_data or "pull_request" in provider_data.get("events", [])
+ ):
+ return True
+ if integration.integration_type == Integration.GITLAB_WEBHOOK and (
+ not provider_data or provider_data.get("merge_requests_events")
+ ):
+ return True
+ return False
+
+ def setup_external_builds_option(self):
+ """Disable the external builds option if the project doesn't meet the requirements."""
+ if (
+ settings.ALLOW_PRIVATE_REPOS
+ and self.instance.remote_repository
+ and not self.instance.remote_repository.private
+ ):
+ self.fields["external_builds_privacy_level"].disabled = True
+ # TODO use a proper error/warning instead of help text for error states
+ help_text = _(
+ "We have detected that this project is public, pull request previews are set to public."
+ )
+ self.fields["external_builds_privacy_level"].help_text = help_text
+
+ def clean_prevalidation(self):
+ """Disable the external builds option if the project doesn't meet the requirements."""
+ integrations = list(self.instance.integrations.all())
+ has_supported_integration = self.has_supported_integration(integrations)
+ can_build_external_versions = self.can_build_external_versions(integrations)
+
+ # External builds are supported for this project,
+ # don't disable the option.
+ if has_supported_integration and can_build_external_versions:
+ return
+
+ msg = None
+ url = reverse("projects_integrations", args=[self.instance.slug])
+
+ if not has_supported_integration:
+ msg = _(
+ "To build from pull requests you need a "
+ f'GitHub or GitLab <a href="{url}">integration</a>.'
+ )
+
+ if has_supported_integration and not can_build_external_versions:
+ # If there is only one integration, link directly to it.
+ if len(integrations) == 1:
+ url = reverse(
+ "projects_integrations_detail",
+ args=[self.instance.slug, integrations[0].pk],
+ )
+ msg = _(
+ "To build from pull requests your repository's webhook "
+ "needs to send pull request events. "
+ f'Try to <a href="{url}">resync your integration</a>.'
+ )
+
+ if msg:
+ # TODO use a proper error/warning instead of help text for error states
+ field = self.fields["external_builds_enabled"]
+ field.disabled = True
+ field.help_text = f"{msg} {field.help_text}"
+ # Don't raise an error on the Update form,
+ # to keep backwards compat
+ if not self.fields.get("name"):
+ raise RichValidationError(
+ msg,
+ header=_("Pull request builds not supported"),
+ )
+
+
class ProjectFormPrevalidateMixin:
"""Provides shared logic between the automatic and manual create forms."""
@@ -292,6 +396,7 @@ def __init__(self, *args, **kwargs):
class UpdateProjectForm(
ProjectTriggerBuildMixin,
ProjectBasicsForm,
+ ProjectPRBuildsMixin,
):
"""Main project settings form."""
@@ -383,83 +488,6 @@ def __init__(self, *args, **kwargs):
self.setup_external_builds_option()
- def setup_external_builds_option(self):
- """Disable the external builds option if the project doesn't meet the requirements."""
- if (
- settings.ALLOW_PRIVATE_REPOS
- and self.instance.remote_repository
- and not self.instance.remote_repository.private
- ):
- self.fields["external_builds_privacy_level"].disabled = True
- help_text = _(
- "We have detected that this project is public, pull request previews are set to public."
- )
- self.fields["external_builds_privacy_level"].help_text = help_text
-
- integrations = list(self.instance.integrations.all())
- has_supported_integration = self.has_supported_integration(integrations)
- can_build_external_versions = self.can_build_external_versions(integrations)
-
- # External builds are supported for this project,
- # don't disable the option.
- if has_supported_integration and can_build_external_versions:
- return
-
- msg = None
- url = reverse("projects_integrations", args=[self.instance.slug])
- if not has_supported_integration:
- msg = _(
- f'To build from pull requests you need a GitHub or GitLab <a href="{url}">integration</a>.'
- )
- if has_supported_integration and not can_build_external_versions:
- # If there is only one integration, link directly to it.
- if len(integrations) == 1:
- url = reverse(
- "projects_integrations_detail",
- args=[self.instance.slug, integrations[0].pk],
- )
- msg = _(
- f'To build from pull requests your repository\'s webhook needs to send pull request events. Try to <a href="{url}">resync your integration</a>.'
- )
-
- if msg:
- field = self.fields["external_builds_enabled"]
- field.disabled = True
- field.help_text = f"{msg} {field.help_text}"
-
- def has_supported_integration(self, integrations):
- supported_types = {Integration.GITHUB_WEBHOOK, Integration.GITLAB_WEBHOOK}
- for integration in integrations:
- if integration.integration_type in supported_types:
- return True
- return False
-
- def can_build_external_versions(self, integrations):
- """
- Check if external versions can be enabled for this project.
-
- A project can build external versions if:
-
- - They are using GitHub or GitLab.
- - The repository's webhook is setup to send pull request events.
-
- If the integration's provider data isn't set,
- it could mean that the user created the integration manually,
- and doesn't have an account connected.
- So we don't know for sure if the webhook is sending pull request events.
- """
- for integration in integrations:
- provider_data = integration.provider_data
- if integration.integration_type == Integration.GITHUB_WEBHOOK and (
- not provider_data or "pull_request" in provider_data.get("events", [])
- ):
- return True
- if integration.integration_type == Integration.GITLAB_WEBHOOK and (
- not provider_data or provider_data.get("merge_requests_events")
- ):
- return True
- return False
-
def clean_readthedocs_yaml_path(self):
"""
Validate user input to help user.
@@ -571,6 +599,24 @@ def clean_alias(self):
return alias
+class ProjectPullRequestForm(forms.ModelForm, ProjectPRBuildsMixin):
+
+ """Project pull requests configuration form."""
+
+ class Meta:
+ model = Project
+ fields = ["external_builds_enabled", "external_builds_privacy_level"]
+
+ def __init__(self, *args, **kwargs):
+ self.project = kwargs.pop("project", None)
+ super().__init__(*args, **kwargs)
+
+ self.setup_external_builds_option()
+
+ if not settings.ALLOW_PRIVATE_REPOS:
+ self.fields.pop("external_builds_privacy_level")
+
+
class AddonsConfigForm(forms.ModelForm):
"""Form to opt-in into new beta addons."""
diff --git a/readthedocs/projects/urls/private.py b/readthedocs/projects/urls/private.py
--- a/readthedocs/projects/urls/private.py
+++ b/readthedocs/projects/urls/private.py
@@ -33,6 +33,7 @@
ProjectEmailNotificationsCreate,
ProjectNotifications,
ProjectNotificationsDelete,
+ ProjectPullRequestsUpdate,
ProjectRedirectsCreate,
ProjectRedirectsDelete,
ProjectRedirectsInsert,
@@ -181,6 +182,11 @@
ProjectAdvertisingUpdate.as_view(),
name="projects_advertising",
),
+ re_path(
+ r"^(?P<project_slug>[-\w]+)/pull-requests/$",
+ ProjectPullRequestsUpdate.as_view(),
+ name="projects_pull_requests",
+ ),
re_path(
r"^(?P<project_slug>[-\w]+)/search-analytics/$",
SearchAnalytics.as_view(),
diff --git a/readthedocs/projects/views/private.py b/readthedocs/projects/views/private.py
--- a/readthedocs/projects/views/private.py
+++ b/readthedocs/projects/views/private.py
@@ -1,4 +1,5 @@
"""Project views for authenticated users."""
+
import structlog
from allauth.socialaccount.models import SocialAccount
from django.conf import settings
@@ -60,6 +61,7 @@
ProjectBasicsForm,
ProjectConfigForm,
ProjectManualForm,
+ ProjectPullRequestForm,
ProjectRelationshipForm,
RedirectForm,
TranslationForm,
@@ -1212,3 +1214,18 @@ def _get_csv_data(self):
def _get_feature(self, project):
return get_feature(project, feature_type=self.feature_type)
+
+
+class ProjectPullRequestsUpdate(PrivateViewMixin, UpdateView):
+ model = Project
+ form_class = ProjectPullRequestForm
+ success_message = _("Pull request settings have been updated")
+ template_name = "projects/pull_requests_form.html"
+ lookup_url_kwarg = "project_slug"
+ lookup_field = "slug"
+
+ def get_queryset(self):
+ return self.model.objects.for_admin_user(self.request.user)
+
+ def get_success_url(self):
+ return reverse("projects_pull_requests", args=[self.object.slug])
| Dashboard: Move "Build pull requests for this project"
## What's the problem this feature will solve?
I couldn't find an open issue for this. And that's maybe why it hasn't been done? :)
"Build pull requests for this project" should be much easier to spot. Having it nested way down on "Advanced Settings" seems to be problematic. I noticed it while writing documentation: We refer to this feature _a lot_, so it seems counter-intuitive that it's so hidden.
Even on a larger display, it's barely visible:

## Describe the solution you'd like
Move "Build pull requests for this project" to the main settings page, preferably in relation to the repository type.

## Alternative solutions
Considered if this issue could be solved exclusively in new dashboard templates, but I believe that this is related to Django forms and needs to be solved for both new and old dashboard in the same go.
| 2023-08-22T20:55:47 |
||
readthedocs/readthedocs.org | 10,668 | readthedocs__readthedocs.org-10668 | [
"9356"
] | f999d65cbef8f3704d4f86f05c49ed81884455d9 | diff --git a/readthedocs/notifications/views.py b/readthedocs/notifications/views.py
--- a/readthedocs/notifications/views.py
+++ b/readthedocs/notifications/views.py
@@ -1,5 +1,5 @@
"""Django views for the notifications app."""
-from django.contrib import admin, messages
+from django.contrib import messages
from django.http import HttpResponseRedirect
from django.views.generic import FormView
@@ -42,9 +42,7 @@ def get_form_kwargs(self):
def get_initial(self):
"""Add selected ids to initial form data."""
initial = super().get_initial()
- initial["_selected_action"] = self.request.POST.getlist(
- admin.ACTION_CHECKBOX_NAME,
- )
+ initial["_selected_action"] = self.request.POST.getlist("_selected_action")
return initial
def form_valid(self, form):
| Django: adapt admin code for 3.x
It seems that we missed an upgrade to make it fully compatible with Django 3.x
We are using `admin.ACTION_CHECKBOX_NAME` when it was deprecated and it was removed already:
> The compatibility import of django.contrib.admin.helpers.ACTION_CHECKBOX_NAME in django.contrib.admin is removed.
(from https://docs.djangoproject.com/en/4.0/releases/3.1/#id1)
The code lives at https://github.com/readthedocs/readthedocs.org/blob/e94c26074e9abdf7056b4e6502c52f8a6b128055/readthedocs/notifications/views.py#L48
| 2023-08-26T20:18:56 |
||
readthedocs/readthedocs.org | 10,686 | readthedocs__readthedocs.org-10686 | [
"10680"
] | b13a321ba29d6b8e9651c7f42c2d23585333600b | diff --git a/readthedocs/proxito/views/hosting.py b/readthedocs/proxito/views/hosting.py
--- a/readthedocs/proxito/views/hosting.py
+++ b/readthedocs/proxito/views/hosting.py
@@ -174,12 +174,21 @@ def _v0(self, project, version, build, filename):
It tries to follow some similarity with the APIv3 for already-known resources
(Project, Version, Build, etc).
"""
- versions_active_built_not_hidden = (
- Version.internal.public(project=project, only_active=True, only_built=True)
- .exclude(hidden=True)
- .only("slug")
- .order_by("slug")
- )
+ version_downloads = []
+ versions_active_built_not_hidden = Version.objects.none()
+
+ if not project.single_version:
+ versions_active_built_not_hidden = (
+ Version.internal.public(
+ project=project, only_active=True, only_built=True
+ )
+ .exclude(hidden=True)
+ .only("slug")
+ .order_by("slug")
+ )
+ if version:
+ version_downloads = version.get_downloads(pretty=True).items()
+
project_translations = (
project.translations.all().only("language").order_by("language")
)
@@ -283,7 +292,7 @@ def _v0(self, project, version, build, filename):
"name": name,
"url": url,
}
- for name, url in version.get_downloads(pretty=True).items()
+ for name, url in version_downloads
],
# TODO: find a way to get this data in a reliably way.
# We don't have a simple way to map a URL to a file in the repository.
| Addons: don't return any version when project is "single version"
If you go to https://pycamp.es/ you will see the flyout shows two versions: latest and 2022. However, this project is marked as "Single version". If you click on them, they give you 404 URLs.
https://github.com/readthedocs/readthedocs.org/blob/6816afe5d88bb3986a0ff4a4381f2ccf4cd7c7ea/readthedocs/proxito/views/hosting.py#L272-L279
| 2023-08-30T10:11:34 |
||
readthedocs/readthedocs.org | 10,704 | readthedocs__readthedocs.org-10704 | [
"10694"
] | 263185ea5e4c772c8d328a2fca62c93f209d5702 | diff --git a/readthedocs/proxito/views/hosting.py b/readthedocs/proxito/views/hosting.py
--- a/readthedocs/proxito/views/hosting.py
+++ b/readthedocs/proxito/views/hosting.py
@@ -1,21 +1,26 @@
"""Views for hosting features."""
+from functools import lru_cache
+
import packaging
import structlog
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.http import Http404, JsonResponse
-from django.views import View
+from rest_framework.renderers import JSONRenderer
+from rest_framework.views import APIView
+from readthedocs.api.mixins import CDNCacheTagsMixin
+from readthedocs.api.v2.permissions import IsAuthorizedToViewVersion
from readthedocs.api.v3.serializers import (
BuildSerializer,
ProjectSerializer,
VersionSerializer,
)
from readthedocs.builds.models import Version
-from readthedocs.core.mixins import CDNCacheControlMixin
from readthedocs.core.resolver import resolver
from readthedocs.core.unresolver import UnresolverError, unresolver
+from readthedocs.core.utils.extend import SettingsOverrideObject
from readthedocs.projects.models import Feature
log = structlog.get_logger(__name__) # noqa
@@ -35,7 +40,7 @@ class ClientError(Exception):
)
-class ReadTheDocsConfigJson(CDNCacheControlMixin, View):
+class BaseReadTheDocsConfigJson(CDNCacheTagsMixin, APIView):
"""
API response consumed by our JavaScript client.
@@ -49,8 +54,52 @@ class ReadTheDocsConfigJson(CDNCacheControlMixin, View):
(e.g. ``window.location.href``)
"""
- def get(self, request):
+ http_method_names = ["get"]
+ permission_classes = [IsAuthorizedToViewVersion]
+ renderer_classes = [JSONRenderer]
+ project_cache_tag = "rtd-addons"
+
+ @lru_cache(maxsize=1)
+ def _resolve_resources(self):
+ url = self.request.GET.get("url")
+ if not url:
+ # TODO: not sure what to return here when it fails on the `has_permission`
+ return None, None, None, None
+
+ unresolved_domain = self.request.unresolved_domain
+ project = unresolved_domain.project
+
+ try:
+ unresolved_url = unresolver.unresolve_url(url)
+ version = unresolved_url.version
+ filename = unresolved_url.filename
+ build = version.builds.last()
+
+ except UnresolverError as exc:
+ # If an exception is raised and there is a ``project`` in the
+ # exception, it's a partial match. This could be because of an
+ # invalid URL path, but on a valid project domain. In this case, we
+ # continue with the ``project``, but without a ``version``.
+ # Otherwise, we return 404 NOT FOUND.
+ project = getattr(exc, "project", None)
+ if not project:
+ raise Http404() from exc
+
+ version = None
+ filename = None
+ build = None
+
+ return project, version, build, filename
+ def _get_project(self):
+ project, version, build, filename = self._resolve_resources()
+ return project
+
+ def _get_version(self):
+ project, version, build, filename = self._resolve_resources()
+ return version
+
+ def get(self, request, format=None):
url = request.GET.get("url")
if not url:
return JsonResponse(
@@ -83,30 +132,16 @@ def get(self, request):
status=400,
)
- unresolved_domain = request.unresolved_domain
- project = unresolved_domain.project
-
- try:
- unresolved_url = unresolver.unresolve_url(url)
- version = unresolved_url.version
- filename = unresolved_url.filename
- build = version.builds.last()
-
- except UnresolverError as exc:
- # If an exception is raised and there is a ``project`` in the
- # exception, it's a partial match. This could be because of an
- # invalid URL path, but on a valid project domain. In this case, we
- # continue with the ``project``, but without a ``version``.
- # Otherwise, we return 404 NOT FOUND.
- project = getattr(exc, "project", None)
- if not project:
- raise Http404() from exc
-
- version = None
- filename = None
- build = None
+ project, version, build, filename = self._resolve_resources()
- data = AddonsResponse().get(addons_version, project, version, build, filename)
+ data = AddonsResponse().get(
+ addons_version,
+ project,
+ version,
+ build,
+ filename,
+ user=request.user,
+ )
return JsonResponse(data, json_dumps_params={"indent": 4, "sort_keys": True})
@@ -149,7 +184,15 @@ class BuildSerializerNoLinks(NoLinksMixin, BuildSerializer):
class AddonsResponse:
- def get(self, addons_version, project, version=None, build=None, filename=None):
+ def get(
+ self,
+ addons_version,
+ project,
+ version=None,
+ build=None,
+ filename=None,
+ user=None,
+ ):
"""
Unique entry point to get the proper API response.
@@ -157,12 +200,12 @@ def get(self, addons_version, project, version=None, build=None, filename=None):
best JSON structure for that particular version.
"""
if addons_version.major == 0:
- return self._v0(project, version, build, filename)
+ return self._v0(project, version, build, filename, user)
if addons_version.major == 1:
- return self._v1(project, version, build, filename)
+ return self._v1(project, version, build, filename, user)
- def _v0(self, project, version, build, filename):
+ def _v0(self, project, version, build, filename, user):
"""
Initial JSON data structure consumed by the JavaScript client.
@@ -180,7 +223,10 @@ def _v0(self, project, version, build, filename):
if not project.single_version:
versions_active_built_not_hidden = (
Version.internal.public(
- project=project, only_active=True, only_built=True
+ project=project,
+ only_active=True,
+ only_built=True,
+ user=user,
)
.exclude(hidden=True)
.only("slug")
@@ -369,7 +415,11 @@ def _v0(self, project, version, build, filename):
return data
- def _v1(self, project, version, build, filename):
+ def _v1(self, project, version, build, filename, user):
return {
"comment": "Undefined yet. Use v0 for now",
}
+
+
+class ReadTheDocsConfigJson(SettingsOverrideObject):
+ _default_class = BaseReadTheDocsConfigJson
| Addons: purge `/addons/?url=` when changing data
We are currently caching `/addons/?url=` endpoint. This is great, but there are situations where we need to invalidate the cache:
* changing any field of the version
* new versions activated
* ethicalads due to `ad_free` change
* new successful build for a particular version (changes `builds.current` from the API response)
* changing any field for the project
When any of these things changes we should be able to trigger a cache invalidation via tags like:
* `project:<slug> version:<slug> addons` to invalidate a specific version
* `project:<slug> addons` to invalidate it for the whole project
I think there may be more things to consider here, but I wanted to at least open an issue at this point to track this and keep it in mind.
* Related https://github.com/readthedocs/readthedocs.org/issues/10536
| 2023-09-04T10:37:50 |
||
readthedocs/readthedocs.org | 10,754 | readthedocs__readthedocs.org-10754 | [
"10685"
] | 38bcc2c7901e610917f143909f94631709fa7f73 | diff --git a/readthedocs/proxito/views/hosting.py b/readthedocs/proxito/views/hosting.py
--- a/readthedocs/proxito/views/hosting.py
+++ b/readthedocs/proxito/views/hosting.py
@@ -233,8 +233,9 @@ def _v0(self, project, version, build, filename, user):
.only("slug")
.order_by("slug")
)
- if version:
- version_downloads = version.get_downloads(pretty=True).items()
+
+ if version:
+ version_downloads = version.get_downloads(pretty=True).items()
project_translations = (
project.translations.all().only("language").order_by("language")
| diff --git a/readthedocs/proxito/tests/test_headers.py b/readthedocs/proxito/tests/test_headers.py
--- a/readthedocs/proxito/tests/test_headers.py
+++ b/readthedocs/proxito/tests/test_headers.py
@@ -12,7 +12,7 @@
from readthedocs.builds.models import Version
from readthedocs.organizations.models import Organization
from readthedocs.projects.constants import PRIVATE, PUBLIC
-from readthedocs.projects.models import Domain, HTTPHeader
+from readthedocs.projects.models import AddonsConfig, Domain, HTTPHeader
from .base import BaseDocServing
@@ -168,6 +168,16 @@ def test_hosting_integrations_header(self):
self.assertIsNotNone(r.get("X-RTD-Hosting-Integrations"))
self.assertEqual(r["X-RTD-Hosting-Integrations"], "true")
+ def test_force_addons_header(self):
+ fixture.get(AddonsConfig, project=self.project, enabled=True)
+
+ r = self.client.get(
+ "/en/latest/", secure=True, headers={"host": "project.dev.readthedocs.io"}
+ )
+ self.assertEqual(r.status_code, 200)
+ self.assertIsNotNone(r.get("X-RTD-Force-Addons"))
+ self.assertEqual(r["X-RTD-Force-Addons"], "true")
+
@override_settings(ALLOW_PRIVATE_REPOS=False)
def test_cors_headers_external_version(self):
get(
diff --git a/readthedocs/proxito/tests/test_hosting.py b/readthedocs/proxito/tests/test_hosting.py
--- a/readthedocs/proxito/tests/test_hosting.py
+++ b/readthedocs/proxito/tests/test_hosting.py
@@ -10,9 +10,9 @@
from django.urls import reverse
from readthedocs.builds.constants import LATEST
-from readthedocs.builds.models import Build
-from readthedocs.projects.constants import PUBLIC
-from readthedocs.projects.models import Project
+from readthedocs.builds.models import Build, Version
+from readthedocs.projects.constants import PRIVATE, PUBLIC
+from readthedocs.projects.models import Feature, Project
@override_settings(
@@ -123,3 +123,306 @@ def test_get_config_unsupported_version(self):
)
assert r.status_code == 400
assert r.json() == self._get_response_dict("v2")
+
+ def test_disabled_addons_via_feature_flags(self):
+ fixture.get(
+ Feature,
+ projects=[self.project],
+ feature_id=Feature.ADDONS_ANALYTICS_DISABLED,
+ )
+ fixture.get(
+ Feature,
+ projects=[self.project],
+ feature_id=Feature.ADDONS_EXTERNAL_VERSION_WARNING_DISABLED,
+ )
+ fixture.get(
+ Feature,
+ projects=[self.project],
+ feature_id=Feature.ADDONS_NON_LATEST_VERSION_WARNING_DISABLED,
+ )
+ fixture.get(
+ Feature,
+ projects=[self.project],
+ feature_id=Feature.ADDONS_DOC_DIFF_DISABLED,
+ )
+ fixture.get(
+ Feature,
+ projects=[self.project],
+ feature_id=Feature.ADDONS_FLYOUT_DISABLED,
+ )
+ fixture.get(
+ Feature,
+ projects=[self.project],
+ feature_id=Feature.ADDONS_SEARCH_DISABLED,
+ )
+ fixture.get(
+ Feature,
+ projects=[self.project],
+ feature_id=Feature.ADDONS_HOTKEYS_DISABLED,
+ )
+
+ r = self.client.get(
+ reverse("proxito_readthedocs_docs_addons"),
+ {
+ "url": "https://project.dev.readthedocs.io/en/latest/",
+ "client-version": "0.6.0",
+ "api-version": "0.1.0",
+ },
+ secure=True,
+ headers={
+ "host": "project.dev.readthedocs.io",
+ },
+ )
+ assert r.status_code == 200
+ assert r.json()["addons"]["analytics"]["enabled"] is False
+ assert r.json()["addons"]["external_version_warning"]["enabled"] is False
+ assert r.json()["addons"]["non_latest_version_warning"]["enabled"] is False
+ assert r.json()["addons"]["doc_diff"]["enabled"] is False
+ assert r.json()["addons"]["flyout"]["enabled"] is False
+ assert r.json()["addons"]["search"]["enabled"] is False
+ assert r.json()["addons"]["hotkeys"]["enabled"] is False
+
+ def test_non_latest_version_warning_versions(self):
+ fixture.get(
+ Version,
+ project=self.project,
+ privacy_level=PRIVATE,
+ slug="private",
+ verbose_name="private",
+ built=True,
+ active=True,
+ )
+ fixture.get(
+ Version,
+ project=self.project,
+ privacy_level=PUBLIC,
+ slug="public-built",
+ verbose_name="public-built",
+ built=True,
+ active=True,
+ )
+ fixture.get(
+ Version,
+ project=self.project,
+ privacy_level=PUBLIC,
+ slug="public-not-built",
+ verbose_name="public-not-built",
+ built=False,
+ active=True,
+ )
+
+ r = self.client.get(
+ reverse("proxito_readthedocs_docs_addons"),
+ {
+ "url": "https://project.dev.readthedocs.io/en/latest/",
+ "client-version": "0.6.0",
+ "api-version": "0.1.0",
+ },
+ secure=True,
+ headers={
+ "host": "project.dev.readthedocs.io",
+ },
+ )
+ assert r.status_code == 200
+
+ expected = ["latest", "public-built"]
+ assert r.json()["addons"]["non_latest_version_warning"]["versions"] == expected
+
+ def test_flyout_versions(self):
+ fixture.get(
+ Version,
+ project=self.project,
+ privacy_level=PRIVATE,
+ slug="private",
+ verbose_name="private",
+ built=True,
+ active=True,
+ )
+ fixture.get(
+ Version,
+ project=self.project,
+ privacy_level=PUBLIC,
+ slug="public-built",
+ verbose_name="public-built",
+ built=True,
+ active=True,
+ )
+ fixture.get(
+ Version,
+ project=self.project,
+ privacy_level=PUBLIC,
+ slug="public-not-built",
+ verbose_name="public-not-built",
+ built=False,
+ active=True,
+ )
+ fixture.get(
+ Version,
+ project=self.project,
+ privacy_level=PUBLIC,
+ slug="hidden",
+ verbose_name="hidden",
+ built=False,
+ hidden=True,
+ active=True,
+ )
+
+ r = self.client.get(
+ reverse("proxito_readthedocs_docs_addons"),
+ {
+ "url": "https://project.dev.readthedocs.io/en/latest/",
+ "client-version": "0.6.0",
+ "api-version": "0.1.0",
+ },
+ secure=True,
+ headers={
+ "host": "project.dev.readthedocs.io",
+ },
+ )
+ assert r.status_code == 200
+
+ expected = [
+ {"slug": "latest", "url": "/en/latest/"},
+ {"slug": "public-built", "url": "/en/public-built/"},
+ ]
+ assert r.json()["addons"]["flyout"]["versions"] == expected
+
+ def test_flyout_translations(self):
+ fixture.get(
+ Project,
+ slug="translation",
+ main_language_project=self.project,
+ language="ja",
+ )
+
+ r = self.client.get(
+ reverse("proxito_readthedocs_docs_addons"),
+ {
+ "url": "https://project.dev.readthedocs.io/en/latest/",
+ "client-version": "0.6.0",
+ "api-version": "0.1.0",
+ },
+ secure=True,
+ headers={
+ "host": "project.dev.readthedocs.io",
+ },
+ )
+ assert r.status_code == 200
+
+ expected = [
+ {"slug": "ja", "url": "/ja/"},
+ ]
+ assert r.json()["addons"]["flyout"]["translations"] == expected
+
+ def test_flyout_downloads(self):
+ fixture.get(
+ Version,
+ project=self.project,
+ privacy_level=PUBLIC,
+ slug="offline",
+ verbose_name="offline",
+ built=True,
+ has_pdf=True,
+ has_epub=True,
+ has_htmlzip=True,
+ active=True,
+ )
+
+ r = self.client.get(
+ reverse("proxito_readthedocs_docs_addons"),
+ {
+ "url": "https://project.dev.readthedocs.io/en/offline/",
+ "client-version": "0.6.0",
+ "api-version": "0.1.0",
+ },
+ secure=True,
+ headers={
+ "host": "project.dev.readthedocs.io",
+ },
+ )
+ assert r.status_code == 200
+
+ expected = [
+ {
+ "name": "PDF",
+ "url": "//project.dev.readthedocs.io/_/downloads/en/offline/pdf/",
+ },
+ {
+ "name": "HTML",
+ "url": "//project.dev.readthedocs.io/_/downloads/en/offline/htmlzip/",
+ },
+ {
+ "name": "Epub",
+ "url": "//project.dev.readthedocs.io/_/downloads/en/offline/epub/",
+ },
+ ]
+ assert r.json()["addons"]["flyout"]["downloads"] == expected
+
+ def test_flyout_single_version_project(self):
+ self.version.has_pdf = True
+ self.version.has_epub = True
+ self.version.has_htmlzip = True
+ self.version.save()
+
+ self.project.single_version = True
+ self.project.save()
+
+ r = self.client.get(
+ reverse("proxito_readthedocs_docs_addons"),
+ {
+ "url": "https://project.dev.readthedocs.io/",
+ "client-version": "0.6.0",
+ "api-version": "0.1.0",
+ },
+ secure=True,
+ headers={
+ "host": "project.dev.readthedocs.io",
+ },
+ )
+ assert r.status_code == 200
+
+ expected = []
+ assert r.json()["addons"]["flyout"]["versions"] == expected
+
+ expected = [
+ {
+ "name": "PDF",
+ "url": "//project.dev.readthedocs.io/_/downloads/en/latest/pdf/",
+ },
+ {
+ "name": "HTML",
+ "url": "//project.dev.readthedocs.io/_/downloads/en/latest/htmlzip/",
+ },
+ {
+ "name": "Epub",
+ "url": "//project.dev.readthedocs.io/_/downloads/en/latest/epub/",
+ },
+ ]
+ assert r.json()["addons"]["flyout"]["downloads"] == expected
+
+ def test_project_subproject(self):
+ subproject = fixture.get(
+ Project, slug="subproject", repo="https://github.com/readthedocs/subproject"
+ )
+ self.project.add_subproject(subproject)
+
+ r = self.client.get(
+ reverse("proxito_readthedocs_docs_addons"),
+ {
+ "url": "https://project.dev.readthedocs.io/projects/subproject/en/latest/",
+ "client-version": "0.6.0",
+ "api-version": "0.1.0",
+ },
+ secure=True,
+ headers={
+ "host": "project.dev.readthedocs.io",
+ },
+ )
+ assert r.status_code == 200
+
+ assert r.json()["projects"]["current"]["id"] == subproject.pk
+ assert r.json()["projects"]["current"]["slug"] == subproject.slug
+ assert (
+ r.json()["projects"]["current"]["repository"]["url"]
+ == "https://github.com/readthedocs/subproject"
+ )
| Addons: create a suite tests for Addons API endpoint
We need to create a good amount of tests for this API endpoint so we are covered.
| Add tests for the HTTP headers and also for the enabled/disabled queryset: https://github.com/readthedocs/readthedocs.org/pull/10733 | 2023-09-19T15:45:57 |
readthedocs/readthedocs.org | 10,820 | readthedocs__readthedocs.org-10820 | [
"154"
] | 59299d102f38515e4a7f10d17f28857fd9c1a829 | diff --git a/readthedocs/proxito/views/hosting.py b/readthedocs/proxito/views/hosting.py
--- a/readthedocs/proxito/views/hosting.py
+++ b/readthedocs/proxito/views/hosting.py
@@ -17,6 +17,7 @@
ProjectSerializer,
VersionSerializer,
)
+from readthedocs.builds.constants import LATEST
from readthedocs.builds.models import Version
from readthedocs.core.resolver import resolver
from readthedocs.core.unresolver import UnresolverError, unresolver
@@ -306,7 +307,9 @@ def _v0(self, project, version, build, filename, user):
# "http://test-builds-local.devthedocs.org/en/latest/index.html"
"base_url": resolver.resolve(
project=project,
- version_slug=project.get_default_version(),
+ # NOTE: we are using LATEST version to compare against to for now.
+ # Ideally, this should be configurable by the user.
+ version_slug=LATEST,
language=project.language,
filename=filename,
)
| no documentation/guidance on what to do with pdfs, epubs, offline
the docs for pdfs here: http://read-the-docs.readthedocs.org/en/latest/features.html#pdf-generation
"When you build your project on RTD, we automatically build a PDF of your projects documentation. We also build them for every version that you upload, so we can host the PDFs of your latest documentation, as well as your latest stable releases as well."
and that's it !
Here's some useful information:
1. what is the link for the PDF ?
2. how do I make my docs link to these PDFs?
3. how about the links to epubs etc. ?
Django is doing it, they have a little thing on the side: "Offline (Django 1.3): HTML | PDF | ePub "
how do other projects get all those little links on their sidebar ? at least the PDF ? the path seems to be "media.readthedocs.org/pdf/[projname]/[version]/[projname].pdf". I don't see how I could get at the current "version" tag from within my templates.
| This is mainly a UI issue. I think the whole dashboard UI needs a refresh, and this stuff will be exposed then.
I replaced the individual links to each download for latest in the bar with a link to a Downloads page, that should list all active DLs for a project. This will work for now, though we need to get some fancier styling and integration for this.
| 2023-10-16T13:33:33 |
|
readthedocs/readthedocs.org | 10,823 | readthedocs__readthedocs.org-10823 | [
"10536"
] | 687f96e870f7de827c42478ae48b1f0ced454b0a | diff --git a/readthedocs/api/v2/permissions.py b/readthedocs/api/v2/permissions.py
--- a/readthedocs/api/v2/permissions.py
+++ b/readthedocs/api/v2/permissions.py
@@ -40,6 +40,17 @@ class IsAuthorizedToViewVersion(permissions.BasePermission):
def has_permission(self, request, view):
project = view._get_project()
version = view._get_version()
+
+ # I had to add this condition here because I want to return a 400 when
+ # the `project-slug` or `version-slug` are not sent to the API
+ # endpoint. In those cases, we don't have a Project/Version and this
+ # function was failing.
+ #
+ # I think it's a valid use case when Project/Version is invalid to be
+ # able to return a good response from the API view.
+ if project is None or version is None:
+ return True
+
has_access = (
Version.objects.public(
user=request.user,
diff --git a/readthedocs/proxito/views/hosting.py b/readthedocs/proxito/views/hosting.py
--- a/readthedocs/proxito/views/hosting.py
+++ b/readthedocs/proxito/views/hosting.py
@@ -22,7 +22,7 @@
from readthedocs.core.resolver import resolver
from readthedocs.core.unresolver import UnresolverError, unresolver
from readthedocs.core.utils.extend import SettingsOverrideObject
-from readthedocs.projects.models import Feature
+from readthedocs.projects.models import Feature, Project
log = structlog.get_logger(__name__) # noqa
@@ -47,10 +47,19 @@ class BaseReadTheDocsConfigJson(CDNCacheTagsMixin, APIView):
Attributes:
- url (required): absolute URL from where the request is performed
- (e.g. ``window.location.href``)
-
api-version (required): API JSON structure version (e.g. ``0``, ``1``, ``2``).
+
+ project-slug (required): slug of the project.
+ Optional if "url" is sent.
+
+ version-slug (required): slug of the version.
+ Optional if "url" is sent.
+
+ url (optional): absolute URL from where the request is performed.
+ When sending "url" attribute, "project-slug" and "version-slug" are ignored.
+ (e.g. ``window.location.href``).
+
+ client-version (optional): JavaScript client version (e.g. ``0.6.0``).
"""
http_method_names = ["get"]
@@ -61,61 +70,73 @@ class BaseReadTheDocsConfigJson(CDNCacheTagsMixin, APIView):
@lru_cache(maxsize=1)
def _resolve_resources(self):
url = self.request.GET.get("url")
- if not url:
- # TODO: not sure what to return here when it fails on the `has_permission`
- return None, None, None, None
+ project_slug = self.request.GET.get("project-slug")
+ version_slug = self.request.GET.get("version-slug")
+
+ project = None
+ version = None
+ build = None
+ filename = None
unresolved_domain = self.request.unresolved_domain
# Main project from the domain.
project = unresolved_domain.project
- try:
- unresolved_url = unresolver.unresolve_url(url)
- # Project from the URL: if it's a subproject it will differ from
- # the main project got from the domain.
- project = unresolved_url.project
- version = unresolved_url.version
- filename = unresolved_url.filename
- # This query should use a particular index:
- # ``builds_build_version_id_state_date_success_12dfb214_idx``.
- # Otherwise, if the index is not used, the query gets too slow.
- build = version.builds.filter(
- success=True,
- state=BUILD_STATE_FINISHED,
- ).last()
-
- except UnresolverError as exc:
- # If an exception is raised and there is a ``project`` in the
- # exception, it's a partial match. This could be because of an
- # invalid URL path, but on a valid project domain. In this case, we
- # continue with the ``project``, but without a ``version``.
- # Otherwise, we return 404 NOT FOUND.
- project = getattr(exc, "project", None)
- if not project:
- raise Http404() from exc
-
- version = None
- filename = None
- build = None
+ if url:
+ try:
+ unresolved_url = unresolver.unresolve_url(url)
+ # Project from the URL: if it's a subproject it will differ from
+ # the main project got from the domain.
+ project = unresolved_url.project
+ version = unresolved_url.version
+ filename = unresolved_url.filename
+ # This query should use a particular index:
+ # ``builds_build_version_id_state_date_success_12dfb214_idx``.
+ # Otherwise, if the index is not used, the query gets too slow.
+ build = version.builds.filter(
+ success=True,
+ state=BUILD_STATE_FINISHED,
+ ).last()
+
+ except UnresolverError as exc:
+ # If an exception is raised and there is a ``project`` in the
+ # exception, it's a partial match. This could be because of an
+ # invalid URL path, but on a valid project domain. In this case, we
+ # continue with the ``project``, but without a ``version``.
+ # Otherwise, we return 404 NOT FOUND.
+ project = getattr(exc, "project", None)
+ if not project:
+ raise Http404() from exc
+
+ else:
+ project = Project.objects.filter(slug=project_slug).first()
+ version = Version.objects.filter(slug=version_slug, project=project).first()
+ if version:
+ build = version.builds.last()
return project, version, build, filename
def _get_project(self):
- project, version, build, filename = self._resolve_resources()
+ project, _, _, _ = self._resolve_resources()
return project
def _get_version(self):
- project, version, build, filename = self._resolve_resources()
+ _, version, _, _ = self._resolve_resources()
return version
def get(self, request, format=None):
url = request.GET.get("url")
+ project_slug = request.GET.get("project-slug")
+ version_slug = request.GET.get("version-slug")
if not url:
- return JsonResponse(
- {"error": "'url' GET attribute is required"},
- status=400,
- )
+ if not project_slug or not version_slug:
+ return JsonResponse(
+ {
+ "error": "'project-slug' and 'version-slug' GET attributes are required when not sending 'url'"
+ },
+ status=400,
+ )
addons_version = request.GET.get("api-version")
if not addons_version:
@@ -148,6 +169,7 @@ def get(self, request, format=None):
version,
build,
filename,
+ url,
user=request.user,
)
return JsonResponse(data, json_dumps_params={"indent": 4, "sort_keys": True})
@@ -199,6 +221,7 @@ def get(
version=None,
build=None,
filename=None,
+ url=None,
user=None,
):
"""
@@ -208,12 +231,12 @@ def get(
best JSON structure for that particular version.
"""
if addons_version.major == 0:
- return self._v0(project, version, build, filename, user)
+ return self._v0(project, version, build, filename, url, user)
if addons_version.major == 1:
- return self._v1(project, version, build, filename, user)
+ return self._v1(project, version, build, filename, url, user)
- def _v0(self, project, version, build, filename, user):
+ def _v0(self, project, version, build, filename, url, user):
"""
Initial JSON data structure consumed by the JavaScript client.
@@ -308,27 +331,6 @@ def _v0(self, project, version, build, filename, user):
versions_active_built_not_hidden.values_list("slug", flat=True)
),
},
- "doc_diff": {
- "enabled": Feature.ADDONS_DOC_DIFF_DISABLED not in project_features,
- # "http://test-builds-local.devthedocs.org/en/latest/index.html"
- "base_url": resolver.resolve(
- project=project,
- # NOTE: we are using LATEST version to compare against to for now.
- # Ideally, this should be configurable by the user.
- version_slug=LATEST,
- language=project.language,
- filename=filename,
- )
- if filename
- else None,
- "root_selector": "[role=main]",
- "inject_styles": True,
- # NOTE: `base_host` and `base_page` are not required, since
- # we are constructing the `base_url` in the backend instead
- # of the frontend, as the doc-diff extension does.
- "base_host": "",
- "base_page": "",
- },
"flyout": {
"enabled": Feature.ADDONS_FLYOUT_DISABLED not in project_features,
"translations": [
@@ -347,22 +349,22 @@ def _v0(self, project, version, build, filename, user):
"versions": [
{
# TODO: name this field "display_name"
- "slug": version.slug,
+ "slug": version_.slug,
"url": resolver.resolve(
project=project,
- version_slug=version.slug,
- external=version.type == EXTERNAL,
+ version_slug=version_.slug,
+ external=version_.type == EXTERNAL,
),
}
- for version in versions_active_built_not_hidden
+ for version_ in versions_active_built_not_hidden
],
"downloads": [
{
# TODO: name this field "display_name"
"name": name,
- "url": url,
+ "url": url_,
}
- for name, url in version_downloads
+ for name, url_ in version_downloads
],
# TODO: find a way to get this data in a reliably way.
# We don't have a simple way to map a URL to a file in the repository.
@@ -417,6 +419,38 @@ def _v0(self, project, version, build, filename, user):
},
}
+ # DocDiff depends on `url=` GET attribute.
+ # This attribute allows us to know the exact filename where the request was made.
+ # If we don't know the filename, we cannot return the data required by DocDiff to work.
+ # In that case, we just don't include the `doc_diff` object in the response.
+ if url:
+ data["addons"].update(
+ {
+ "doc_diff": {
+ "enabled": Feature.ADDONS_DOC_DIFF_DISABLED
+ not in project_features,
+ # "http://test-builds-local.devthedocs.org/en/latest/index.html"
+ "base_url": resolver.resolve(
+ project=project,
+ # NOTE: we are using LATEST version to compare against to for now.
+ # Ideally, this should be configurable by the user.
+ version_slug=LATEST,
+ language=project.language,
+ filename=filename,
+ )
+ if filename
+ else None,
+ "root_selector": "[role=main]",
+ "inject_styles": True,
+ # NOTE: `base_host` and `base_page` are not required, since
+ # we are constructing the `base_url` in the backend instead
+ # of the frontend, as the doc-diff extension does.
+ "base_host": "",
+ "base_page": "",
+ },
+ }
+ )
+
# Update this data with the one generated at build time by the doctool
if version and version.build_data:
data.update(version.build_data)
@@ -450,7 +484,7 @@ def _v0(self, project, version, build, filename, user):
return data
- def _v1(self, project, version, build, filename, user):
+ def _v1(self, project, version, build, filename, url, user):
return {
"api_version": "1",
"comment": "Undefined yet. Use v0 for now",
| diff --git a/readthedocs/proxito/tests/test_hosting.py b/readthedocs/proxito/tests/test_hosting.py
--- a/readthedocs/proxito/tests/test_hosting.py
+++ b/readthedocs/proxito/tests/test_hosting.py
@@ -498,3 +498,81 @@ def test_flyout_subproject_urls(self):
},
]
assert r.json()["addons"]["flyout"]["translations"] == expected_translations
+
+ def test_send_project_not_version_slugs(self):
+ r = self.client.get(
+ reverse("proxito_readthedocs_docs_addons"),
+ {
+ "api-version": "0.1.0",
+ "client-version": "0.6.0",
+ "project-slug": self.project.slug,
+ },
+ secure=True,
+ headers={
+ "host": "project.dev.readthedocs.io",
+ },
+ )
+ assert r.status_code == 400
+ assert r.json() == {
+ "error": "'project-slug' and 'version-slug' GET attributes are required when not sending 'url'"
+ }
+
+ def test_send_version_not_project_slugs(self):
+ r = self.client.get(
+ reverse("proxito_readthedocs_docs_addons"),
+ {
+ "api-version": "0.1.0",
+ "client-version": "0.6.0",
+ "version-slug": self.version.slug,
+ },
+ secure=True,
+ headers={
+ "host": "project.dev.readthedocs.io",
+ },
+ )
+ assert r.status_code == 400
+ assert r.json() == {
+ "error": "'project-slug' and 'version-slug' GET attributes are required when not sending 'url'"
+ }
+
+ def test_send_project_version_slugs(self):
+ r = self.client.get(
+ reverse("proxito_readthedocs_docs_addons"),
+ {
+ "api-version": "0.1.0",
+ "client-version": "0.6.0",
+ "project-slug": self.project.slug,
+ "version-slug": self.version.slug,
+ },
+ secure=True,
+ headers={
+ "host": "project.dev.readthedocs.io",
+ },
+ )
+ assert r.status_code == 200
+ expected_response = self._get_response_dict("v0")
+ # Remove `addons.doc_diff` from the response because it's not present when `url=` is not sent
+ expected_response["addons"].pop("doc_diff")
+
+ assert self._normalize_datetime_fields(r.json()) == expected_response
+
+ def test_send_project_version_slugs_and_url(self):
+ r = self.client.get(
+ reverse("proxito_readthedocs_docs_addons"),
+ {
+ "api-version": "0.1.0",
+ "client-version": "0.6.0",
+ "url": "https://project.dev.readthedocs.io/en/latest/",
+ # When sending `url=`, slugs are ignored
+ "project-slug": "different-project-slug-than-url",
+ "version-slug": "different-version-slug-than-url",
+ },
+ secure=True,
+ headers={
+ "host": "project.dev.readthedocs.io",
+ },
+ )
+ assert r.status_code == 200
+ assert self._normalize_datetime_fields(r.json()) == self._get_response_dict(
+ "v0"
+ )
| Addons: make the API endpoint as cacheable as possible
Once we are fully migrated to the new Addons, we will be hitting the `/_/addons/` endpoint _a lot_. This endpoint will be hit by every single request on a documentation page view. We would like to make this endpoint as fast as possible, but also try to cache it to avoid hitting our servers. It can be compared with the current Footer API.
The `/_/addons/` endpoint needs to be cached by:
- `url` query string parameter
- `X-RTD-Hosting-Integrations-Version` HTTP header
- `X-RTD-Hosting-Integrations-API-Version` HTTP header (once https://github.com/readthedocs/readthedocs.org/pull/10276 gets implemented)
Note the endpoint is behind authentication, so that it's not an issue here.
To build the response, the endpoint depends on:
- Project instance
- Version instance
- Build instance
We need to use the "backend resolver" to build the `base_url` required by the DocDiff addons. That URL depends on the exact filename of the page, so we need cache by `url` parameter. We could talk about different ideas here to be able to not depend on it if we want to. Some ideas the mentioned:
- create a new endpoint to use the backend resolver, and call it for docdiff
- copy / re-implement the "backend resolver" logic in the frontend
- make the current endpoint "expandable" and only return docdiff data when users enable the docdiff in their docs
- add a new query string parameter `docdiff=[true|false]` that's only `true` on pageviews from `*.readthedocs.build`
The last item seems to be the easiest one and will give us most of the benefits, since we 95% of our traffic will call the endpoint with `docdiff=false` 😄
| > `X-RTD-Hosting-Integrations-Version` HTTP header
> `X-RTD-Hosting-Integrations-API-Version` HTTP header
We also talked about moving these to query string paramenters because it's easier to cache them than HTTP headers: https://developers.cloudflare.com/cache/how-to/cache-keys/#create-custom-cache-keys
I took a look at the Algolia integration we have setup on https://rtd-with-docsearch.readthedocs.io/en/latest/ and it seems they send all the versions in the query string. Something to consider 😉

> We also talked about moving these to query string paramenters because it's easier to cache them than HTTP headers
I implemented this part in #10753 and https://github.com/readthedocs/addons/pull/138
Currently, we are using `unresolver.unresolve_url(url)` to get the `Project` and `Version` objects. I don't think we can easily get rid of the `url=` parameter since we need to, somehow, communicate to the endpoint the project and version the user is reading.
However, it would be interesting to figure it out how to do this because in that case, it will allow us to cache the request only "per project & version & build [^1]" instead of "per project & version & build **&** _exact URL_" as we are currently doing now. There are some ideas here:
1. use the `Referer` HTTP header injected automatically by the browser
2. inject some meta HTML tags in the CF worker: `<meta name="project_slug" content="test-builds">` and `<meta name="version_slug" content="latest">`. Then our JS client could send them in the request `/_/addons/?project_slug=test-builds&version_slug="latest"`
I think 1) is probably the easiest and simplest option since 2) seems pretty hacky 😅 . Following that pattern, I think this could be as follows:
- requests made from **regular** documentation pages:
- get the `Referer` HTTP header and call `unresolver.unresolve_url(referer)`
- cache these requests normally (using the `project:version` CDN tags we are already using)
- do not return any data that depends on the _exact URL_
- requests made on documentation pages that **depend on their exact URL** (e.g. pull request previews)
- send the `url=` attribute with the `window.location.href` value
- use the `url=` attribute to call the unresolver
- populate response data that depends on the exact URL (e.g. `doc_diff.base_url`)
- cache these requests normally. They won't interfere with the previous cached requests because they have an extra `url=` attribute. They also won't interfere between themselves because they all have different `url=` values and CF will treat them differently 💪🏼
This sounds pretty simple to understand and follow in my head, but I've been thinking on this pattern for some time already. I'd like to get some feedback on this and decide how we want to move forward.
Note that this will have an impact in our web instances and also in our database. Besides, it will make our addons rendering a lot faster since most of the requests to the endpoint will be cached and returned directly from CF. Currently, the numbers for this endpoint in the last 7 days the response times are:
* AVG: 129 ms
* Median: 76.5 ms
* 95th percentile: 160 ms
[^1]: the build is automatically fetched (no need to pass it to the endpoint) by querying the latest successful build for that particular version.
> get the `Referer` HTTP header and call `unresolver.unresolve_url(referer)`
Hrm, thinking a little more about this, isn't this the same as passing the `url=` attribute? I mean, instead of vary the cache on the `url=` we will need to vary it on `Referer` header, which will have exactly the same behavior, right?
Maybe option 2), even being hacky 🕵️, is the only option that works how we want 😄
Oh, wait! We are already returning `X-RTD-project` and `X-RTD-Version` headers to the browser 🚀 . This solves this problem in a much better way 💯
```
$ curl -sIL https://docs.readthedocs.io/en/stable/ | grep x-rtd-
x-rtd-domain: docs.readthedocs.io
x-rtd-path: /proxito/html/docs/stable/index.html
x-rtd-project: docs
x-rtd-project-method: public_domain
x-rtd-version: stable
x-rtd-version-method: path
```
With that data we can call our addons API as follows: `/_/addons/?project__slug=<X-RTD-Project>&version__slug=<X-RTD-Version>`
One thing that I'm not sure how to do is how to get _those headers_ from a request that was done _before_ our `readthedocs-addons.js` file was loaded. If there is no way to do that, we would need to do an extra `HEAD` request to get them:
```javascript
fetch(window.location.href, {method: "HEAD"})
.then((response) => {
const projectSlug = response.headers.get("x-rtd-project"))
const versionSlug = response.headers.get("x-rtd-version"))
fetch("/_/addons/?" + new URLSearchParams({ "project__slug": projectSlug, "version__slug": versionSlug})
.then((response) => {
// ...
});
}
);
```
Even doing an extra `HEAD` request it will be much better. We can re-search if that possible and manage it as an improvement.
With this approach, we will have two different requests:
1. **Including** `url=` parameter: will be made if there is any addons enable on the page that depends on the URL itself. DocDiff addon is a good example of this. Then, on pages that have DocDiff enabled (pull request previews), our JS will hit `/_/addons/?project__slug=...&version_slug=...` + **`&url=...`**
2. **Omitting** the `url=` parameter: if there aren't any addons enabled on the current page that _requires_ sending the exact URL to the API, the request won't include it: `/_/addons/?project__slug=...&version__slug=...`
With this, our CDN will handle the cache correctly since it will treat the URLs containing only the `project__slug` and `version__slug` as _the same_, no matter what page under the documentation the user is reading. Also, it will treat all the URLs containing `url=` different for all the different pages of the documentation.
Now, with this, we will need to know _before performing the request_ what addons "could be enabled" on the current page to decide whether or not include the `url=`. We will need to ask each addon if they could be enabled, and in that case, if they require sending the `url=` parameter to the backend. Then, **if none of the addons that could be enabled on the page needs to send the `url=` parameter, we don't send it** 👍🏼
I imagine something like:
```javascript
let addons = [ ... ];
let params = {"client-version": "...", "api-version": "..."};
let sendUrlParam = false;
for (const addon of addons) {
if (addon.requiresURL() && addon.couldBeEnabledOnPage()) {
sendUrlParam = true;
break
}
}
if (sendUrlParam) {
params.url = window.location.href;
}
...
fetch(url, ....);
```
Then, an addons like DocDiff would implement:
```javascript
export class DocDiffAddon extends AddonBase {
static requiresUrl() {
return true;
}
static couldBeEnabledOnPage() {
return window.location.host.endsWith(".readthedocs.build");
}
}
```
Links to addons code:
* https://github.com/readthedocs/readthedocs-client/blob/ef72cba9a6bb22d125bce73c8a17bbcb15fd7b3d/src/readthedocs-config.js#L7-L30
* https://github.com/readthedocs/readthedocs-client/blob/ef72cba9a6bb22d125bce73c8a17bbcb15fd7b3d/src/index.js#L11-L57
@humitos I don't love the idea of doing an extra HEAD request, because that will increase latency on the rendering of the addons (2 complete HTTP round trips after page load). If we're already injecting data into the page to add the addons JS, then injecting the value of the project & version slug doesn't seem like additional hackiness, and if it saves us that round trip, likely worth doing.
I don't believe there's a way to capture the headers from the request in our JS, which would be a really nice solution to the problem.
> 1. Including url= parameter: will be made if there is any addons enable on the page that depends on the URL itself. DocDiff addon is a good example of this. Then, on pages that have DocDiff enabled (pull request previews), our JS will hit /_/addons/?project__slug=...&version_slug=... + &url=...
> 2. Omitting the url= parameter: if there aren't any addons enabled on the current page that requires sending the exact URL to the API, the request won't include it: /_/addons/?project__slug=...&version__slug=...
Is this the plan we're going with, assuming that we get the project & version slug *somehow*..., either via a HEAD request, or injected into the page? This logic looks 💯 to me. I think the above logic is great, and then we can try injecting the project/version values in the CF worker, and if that doesn't work, always fall back to a HEAD request.
(Or some other idea we haven't come up with yet 🤔... I think perhaps we could also just parse the URL in JS and only send the hostname and matching version path to the API, but that's harder with the custom URLconf work we've been doing, which would be additional data coming from the server that we need to have in the client, which is the primary problem we're trying to solve here..)
Thanks @ericholscher for your input here.
Summarizing, this is the current plan that I will implement then:
* inject project/version slug on the HTML using `<meta>` tags via CF worker with the same script we are using to inject the addons js
* grab project/version slug via our addons js and send them to the API
* implement the `Addon.couldBeEnabledOnPage` js function on each addon to decide whether or not send `url=` attribute
* send `url=` attribute if there is at least one addon that has to be enabled on the page
Let me know if you agree with the current plan.
> I think perhaps we could also just parse the URL in JS and only send the hostname and matching version path to the API, but that's harder with the custom URLconf work we've been doing
I thought about this already and I didn't see it clearly since we would have to re-implement the Python resolver logic into JavaScript and always keep it updated with all the changes/feature we introduce on it. I thought that would be fragile and we will end up with bugs on both making them behave differently and hard to debug in the end 🤷🏼
@humitos that plan sounds 💯.
> inject project/version slug on the HTML using `<meta>` tags via CF worker with the same script we are using to inject the addons js
This is done and deployed already. See it in action at https://test-builds.readthedocs.io/en/latest/ | 2023-10-16T16:41:03 |
readthedocs/readthedocs.org | 10,840 | readthedocs__readthedocs.org-10840 | [
"10831",
"10830"
] | 97166370b87fcb576108c74b7a0a3431bb44ce61 | diff --git a/readthedocs/builds/migrations/0054_add_builds_index_for_addons.py b/readthedocs/builds/migrations/0054_add_builds_index_for_addons.py
new file mode 100644
--- /dev/null
+++ b/readthedocs/builds/migrations/0054_add_builds_index_for_addons.py
@@ -0,0 +1,20 @@
+# Generated by Django 4.2.6 on 2023-10-18 11:43
+
+from django.db import migrations
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("builds", "0053_alter_version_build_data"),
+ ]
+
+ operations = [
+ migrations.AlterIndexTogether(
+ name="build",
+ index_together={
+ ("date", "id"),
+ ("version", "state", "type"),
+ ("version", "state", "date", "success"),
+ },
+ ),
+ ]
diff --git a/readthedocs/builds/models.py b/readthedocs/builds/models.py
--- a/readthedocs/builds/models.py
+++ b/readthedocs/builds/models.py
@@ -847,8 +847,11 @@ class Meta:
ordering = ['-date']
get_latest_by = 'date'
index_together = [
- ['version', 'state', 'type'],
- ['date', 'id'],
+ # Useful for `/_/addons/` API endpoint.
+ # Query: ``version.builds.filter(success=True, state=BUILD_STATE_FINISHED)``
+ ["version", "state", "date", "success"],
+ ["version", "state", "type"],
+ ["date", "id"],
]
indexes = [
models.Index(fields=['project', 'date']),
diff --git a/readthedocs/proxito/views/hosting.py b/readthedocs/proxito/views/hosting.py
--- a/readthedocs/proxito/views/hosting.py
+++ b/readthedocs/proxito/views/hosting.py
@@ -17,7 +17,7 @@
ProjectSerializer,
VersionSerializer,
)
-from readthedocs.builds.constants import EXTERNAL, LATEST
+from readthedocs.builds.constants import BUILD_STATE_FINISHED, EXTERNAL, LATEST
from readthedocs.builds.models import Version
from readthedocs.core.resolver import resolver
from readthedocs.core.unresolver import UnresolverError, unresolver
@@ -77,7 +77,13 @@ def _resolve_resources(self):
project = unresolved_url.project
version = unresolved_url.version
filename = unresolved_url.filename
- build = version.builds.last()
+ # This query should use a particular index:
+ # ``builds_build_version_id_state_date_success_12dfb214_idx``.
+ # Otherwise, if the index is not used, the query gets too slow.
+ build = version.builds.filter(
+ success=True,
+ state=BUILD_STATE_FINISHED,
+ ).last()
except UnresolverError as exc:
# If an exception is raised and there is a ``project`` in the
| Remove addons response for last build
We noticed a big spike in time spent in build table queries today and
traced the query back to the addons response. The addons data structure
includes a list of the latest builds for each version.
I've disabled that for now, but it's not yet clear why this became an
issue on Fri, or why the cache doesn't seem to be helping much here.
cc @humitos in case you have some more input here
Addons: review and reenable build listing
See https://github.com/readthedocs/readthedocs.org/pull/10831 for some background on this issue.
We found a large increase in build select query time and traced it back to the usage of `project.version.builds.last()`. This is a rather expensive call, so it was disabled. We will need to return to this to figure out if we need it and how to give this data in a performant way if we do.
A few leftovers from my hotfix:
- [ ] Reenable external version warning notification addon in admin
- [ ] Release a addons library version with defensive measures around the missing data
- [ ] Revert some of the hotfix?
| Noted in chat, but I don't exactly know the best way to test this change outside relying on out test suite. I might opt to apply this early or it will take a while to get this out.
Actually, this is not quite the fix :disappointed:
In the addons library we have this part of the data structure hardcoded in our template:
https://github.com/readthedocs/addons/blob/main/src/notification.js#L54
That means that we can't return `None` here, as the addon library html template string interpolation will throw and exception when it can't find the `config.builds.current.id` attribute.
I'm going to proceed with this hotfix to make sure the database stops complaining. Exceptions from the addon library will be limited to projects using PR builds, as this is the only place this URL is shown. It might be worth disabling the notification addon until we add defensive measures to the configuration access and template rendering to it.
I will release this hotfix just to community for now. Currently, the addons data average response time is still over a few seconds long:

Also note: I disabled the external version warning via the feature flag. We'll need to re-enable that later.
And after hotfix:

Seems to have stabilized well, and I haven't noticed any console exceptions yet.
I'm not sure what's the best solution here. Ideally, we should have the `Build` object in the response. We should think about how to make this query to be reliable and fast. I don't really understand why it's slow actually since we have a DB index on `Build.date` and getting the last one should be pretty fast.
```
docs=> \d builds_build
...
Indexes:
"builds_build_pkey" PRIMARY KEY, btree (id)
"Build_State" btree (state)
"builds_buil_project_fea68c_idx" btree (project_id, date)
"builds_build_date_id_9f6984cf_idx" btree (date, id)
"builds_build_project_id" btree (project_id)
"builds_build_version_id_cd1a89966ebc2f3" btree (version_id, state, type)
"date_index" btree (date)
...
docs=>
```
0.002376s seems to be _pretty_ fast.
```
In [2]: p.builds.last()
SELECT "builds_build"."id",
"builds_build"."project_id",
"builds_build"."version_id",
"builds_build"."type",
"builds_build"."state",
"builds_build"."status",
"builds_build"."date",
"builds_build"."success",
"builds_build"."setup",
"builds_build"."setup_error",
"builds_build"."output",
"builds_build"."error",
"builds_build"."exit_code",
"builds_build"."commit",
"builds_build"."version_slug",
"builds_build"."version_name",
"builds_build"."version_type",
"builds_build"."_config",
"builds_build"."readthedocs_yaml_path",
"builds_build"."length",
"builds_build"."builder",
"builds_build"."cold_storage",
"builds_build"."task_id"
FROM "builds_build"
WHERE "builds_build"."project_id" = 822831
ORDER BY "builds_build"."date" ASC
LIMIT 1
Execution time: 0.002376s [Database: default]
Out[2]: SELECT "auth_user"."username"
FROM "auth_user"
INNER JOIN "projects_project_users"
ON ("auth_user"."id" = "projects_project_users"."user_id")
WHERE "projects_project_users"."project_id" = 822831
Execution time: 0.001484s [Database: default]
<Build: Build langchain for hwchase17 dev2049 WFH eugene-lc efriislangchain (18416334)>
In [3]:
```
I ran this query with `EXPLAIN ANALYZE` and it seems it uses the index `builds_buil_project_fea68c_idx` which is an index for `project_id, date` together.
The query is really fast, `Execution Time: 0.061 ms`. I think the problem should be something different, then. Is it possible this query to get slow when a new `Build` object is created but the index is not yet updated? 🤷🏼
```
docs=> EXPLAIN ANALYZE SELECT "builds_build"."id",
"builds_build"."project_id",
"builds_build"."version_id",
"builds_build"."type",
"builds_build"."state",
"builds_build"."status",
"builds_build"."date",
"builds_build"."success",
"builds_build"."setup",
"builds_build"."setup_error",
"builds_build"."output",
"builds_build"."error",
"builds_build"."exit_code",
"builds_build"."commit",
"builds_build"."version_slug",
"builds_build"."version_name",
"builds_build"."version_type",
"builds_build"."_config",
"builds_build"."readthedocs_yaml_path",
"builds_build"."length",
"builds_build"."builder",
"builds_build"."cold_storage",
"builds_build"."task_id"
FROM "builds_build"
WHERE "builds_build"."project_id" = 822831
ORDER BY "builds_build"."date" ASC
LIMIT 1;
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------
Limit (cost=0.56..4.12 rows=1 width=322) (actual time=0.022..0.023 rows=1 loops=1)
-> Index Scan using builds_buil_project_fea68c_idx on builds_build (cost=0.56..1927.76 rows=542 width=322) (actual time=0.022..0.022 rows=1 loops=1)
Index Cond: (project_id = 822831)
Planning Time: 0.586 ms
Execution Time: 0.061 ms
(5 rows)
docs=>
```
I realized that in my previous queries I was taking the latest build of the project, but not for a particular version. Here is the query updated:
```
docs=> EXPLAIN ANALYZE SELECT "builds_build"."id",
"builds_build"."project_id",
"builds_build"."version_id",
"builds_build"."type",
"builds_build"."state",
"builds_build"."status",
"builds_build"."date",
"builds_build"."success",
"builds_build"."setup",
"builds_build"."setup_error",
"builds_build"."output",
"builds_build"."error",
"builds_build"."exit_code",
"builds_build"."commit",
"builds_build"."version_slug",
"builds_build"."version_name",
"builds_build"."version_type",
"builds_build"."_config",
"builds_build"."readthedocs_yaml_path",
"builds_build"."length",
"builds_build"."builder",
"builds_build"."cold_storage",
"builds_build"."task_id"
FROM "builds_build"
WHERE "builds_build"."version_id" = 4740677
ORDER BY "builds_build"."date" ASC
LIMIT 1;
QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Limit (cost=1053.54..1053.54 rows=1 width=322) (actual time=0.046..0.046 rows=1 loops=1)
-> Sort (cost=1053.54..1054.33 rows=318 width=322) (actual time=0.045..0.046 rows=1 loops=1)
Sort Key: date
Sort Method: quicksort Memory: 26kB
-> Index Scan using builds_build_version_id_cd1a89966ebc2f3 on builds_build (cost=0.56..1051.95 rows=318 width=322) (actual time=0.017..0.020 rows=2 loops=1)
Index Cond: (version_id = 4740677)
Planning Time: 0.087 ms
Execution Time: 0.075 ms
(8 rows)
```
I uses the index `builds_build_version_id_cd1a89966ebc2f3` which is based on `(version_id, state, type)` but not on `date`.
I'd propose to create an index for this based on `(version_id, state, date, success)` (based on https://github.com/readthedocs/readthedocs.org/pull/10831#discussion_r1363699484) this will make these queries to be pretty fast, IMO
| 2023-10-18T11:41:48 |
|
readthedocs/readthedocs.org | 10,947 | readthedocs__readthedocs.org-10947 | [
"10934"
] | ab2f8a013593c8a1eed68e96dc90a3122ee6184c | diff --git a/readthedocs/organizations/forms.py b/readthedocs/organizations/forms.py
--- a/readthedocs/organizations/forms.py
+++ b/readthedocs/organizations/forms.py
@@ -208,6 +208,7 @@ def __init__(self, *args, **kwargs):
self.fields["projects"] = forms.ModelMultipleChoiceField(
queryset=self.organization.projects,
widget=forms.CheckboxSelectMultiple,
+ required=False,
)
| Teams: project form doesn't allow for null/empty project list
I found trying to remove a project from a team that it was impossible to remove the project if it was the last project attached to the team. The form expects some non-null value and throws a validation error if the list is empty.
To reproduce:
- Add a team
- Add a project to the team
- Try to remove the project from the team
- You'll get a validation error on the form
Instead, this should be a valid form submission and the team should have 0 projects attached.
| 2023-12-07T23:04:48 |
||
readthedocs/readthedocs.org | 10,979 | readthedocs__readthedocs.org-10979 | [
"9008"
] | ac125ee9cc733b264e3dec5d93e97b0fa22cc2bc | diff --git a/readthedocs/config/config.py b/readthedocs/config/config.py
--- a/readthedocs/config/config.py
+++ b/readthedocs/config/config.py
@@ -226,9 +226,11 @@ def settings(self):
def validate(self):
"""Validates and process ``raw_config``."""
self._config['formats'] = self.validate_formats()
+
+ # This should be called before ``validate_python`` and ``validate_conda``
+ self._config["build"] = self.validate_build()
+
self._config['conda'] = self.validate_conda()
- # This should be called before validate_python
- self._config['build'] = self.validate_build()
self._config['python'] = self.validate_python()
# Call this before validate sphinx and mkdocs
self.validate_doc_types()
@@ -258,6 +260,11 @@ def validate_conda(self):
"""Validates the conda key."""
raw_conda = self._raw_config.get('conda')
if raw_conda is None:
+ if self.is_using_conda:
+ raise ConfigError(
+ message_id=ConfigError.CONDA_KEY_REQUIRED,
+ format_values={"key": "conda"},
+ )
return None
with self.catch_validation_error('conda'):
diff --git a/readthedocs/config/exceptions.py b/readthedocs/config/exceptions.py
--- a/readthedocs/config/exceptions.py
+++ b/readthedocs/config/exceptions.py
@@ -22,6 +22,7 @@ class ConfigError(BuildUserError):
SUBMODULES_INCLUDE_EXCLUDE_TOGETHER = "config:submodules:include-exclude-together"
INVALID_KEY_NAME = "config:base:invalid-key-name"
SYNTAX_INVALID = "config:base:invalid-syntax"
+ CONDA_KEY_REQUIRED = "config:conda:required"
# TODO: improve these error messages shown to the user
diff --git a/readthedocs/config/notifications.py b/readthedocs/config/notifications.py
--- a/readthedocs/config/notifications.py
+++ b/readthedocs/config/notifications.py
@@ -237,6 +237,18 @@
),
type=ERROR,
),
+ Message(
+ id=ConfigError.CONDA_KEY_REQUIRED,
+ header=_("Missing required key"),
+ body=_(
+ textwrap.dedent(
+ """
+ The key <code>conda.environment</code> is required when using Conda or Mamba.
+ """
+ ).strip(),
+ ),
+ type=ERROR,
+ ),
]
registry.add(messages)
| diff --git a/readthedocs/config/tests/test_config.py b/readthedocs/config/tests/test_config.py
--- a/readthedocs/config/tests/test_config.py
+++ b/readthedocs/config/tests/test_config.py
@@ -282,6 +282,23 @@ def test_conda_check_valid(self, tmpdir):
build.validate()
assert build.conda.environment == "environment.yml"
+ def test_conda_key_required_for_conda_mamba(self):
+ build = get_build_config(
+ {
+ "build": {
+ "os": "ubuntu-22.04",
+ "tools": {
+ "python": "miniconda3-4.7",
+ },
+ },
+ }
+ )
+ print(build)
+ with raises(ConfigError) as excinfo:
+ build.validate()
+ assert excinfo.value.message_id == ConfigError.CONDA_KEY_REQUIRED
+ assert excinfo.value.format_values.get("key") == "conda"
+
@pytest.mark.parametrize("value", [3, [], "invalid"])
def test_conda_check_invalid_value(self, value):
build = get_build_config({"conda": value})
| Better error when using conda without an environment file
In this build https://readthedocs.org/projects/test-builds/builds/16326429/ I'm using coda, but I didn't define the environment file in the config file, so it gave me a generic error
The error came from https://github.com/readthedocs/readthedocs.org/blob/6752de13bd7f8ebcf393e66c4fdb738e17171046/readthedocs/doc_builder/python_environments.py#L550-L557
We should probably require using conda.environment when using build.tools.python = conda
| > We should probably require using conda.environment when using build.tools.python = conda
This sounds like the right approach to me as well! 💯
The other way should also be considered probably: defining `conda.environment` but using `build.tools.python: "3.8"`. See https://github.com/readthedocs/readthedocs.org/issues/10047#issuecomment-1438326133
When working on this issue, we should also take a look at https://github.com/readthedocs/readthedocs.org/issues/10502 which is strongly related.
Something similar happens when the Git submodule does not exist: https://github.com/readthedocs/readthedocs.org/pull/10594/files#r1282428289 -- The user gets a weird error.
I noting this here so we don't forget to improve this error when working on this.
This is another issue related to better error messaging on the config file https://github.com/readthedocs/readthedocs.org/issues/8595 | 2024-01-02T15:20:40 |
readthedocs/readthedocs.org | 11,050 | readthedocs__readthedocs.org-11050 | [
"10981"
] | 85fb6a12a99b5ee9cbc49cdf5e4027c3e252ba83 | diff --git a/readthedocs/notifications/migrations/0003_notification_indexes.py b/readthedocs/notifications/migrations/0003_notification_indexes.py
new file mode 100644
--- /dev/null
+++ b/readthedocs/notifications/migrations/0003_notification_indexes.py
@@ -0,0 +1,23 @@
+# Generated by Django 4.2.9 on 2024-01-23 10:50
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("notifications", "0002_notification_format_values"),
+ ]
+
+ operations = [
+ migrations.AlterModelOptions(
+ name="notification",
+ options={},
+ ),
+ migrations.AddIndex(
+ model_name="notification",
+ index=models.Index(
+ fields=["attached_to_content_type", "attached_to_id"],
+ name="notificatio_attache_c6aa1d_idx",
+ ),
+ ),
+ ]
diff --git a/readthedocs/notifications/models.py b/readthedocs/notifications/models.py
--- a/readthedocs/notifications/models.py
+++ b/readthedocs/notifications/models.py
@@ -58,6 +58,11 @@ class Notification(TimeStampedModel):
# notifications attached to the same object.
objects = NotificationQuerySet.as_manager()
+ class Meta:
+ indexes = [
+ models.Index(fields=["attached_to_content_type", "attached_to_id"]),
+ ]
+
def __str__(self):
return self.message_id
| Notifications: cancel notifications when status changes
There are some notifications that depend on a specific status (e.g unpaid subscription). We need to find out what are those and create a periodic Celery task together with Django signals in charge of cancelling those notifications if the status has change (e.g. the user has paid the subscription).
Note that this may also apply to changes made via Django forms.
Another good example is for custom domain pending validation. I'm sure there are more, but I don't have all of them in mind currently.
This is a new feature, so there may be also good _new notifications_ where this pattern could be applied.
| Current notifications affected by this:
* `core:email:validation-pending`
* `project:domain:validation-pending`
* `project:domain:validation-expired`
* `project:invalid:skip-builds`
* `organization:disabled` | 2024-01-23T10:53:38 |
|
readthedocs/readthedocs.org | 11,056 | readthedocs__readthedocs.org-11056 | [
"10773"
] | b212a4d9c6b5c60eb74dca553663597035bfb82b | diff --git a/readthedocs/projects/migrations/0112_disable_analytics_addons.py b/readthedocs/projects/migrations/0112_disable_analytics_addons.py
new file mode 100644
--- /dev/null
+++ b/readthedocs/projects/migrations/0112_disable_analytics_addons.py
@@ -0,0 +1,17 @@
+# Generated by Django 4.2.9 on 2024-01-23 16:54
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("projects", "0111_add_multiple_versions_without_translations"),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name="addonsconfig",
+ name="analytics_enabled",
+ field=models.BooleanField(default=False),
+ ),
+ ]
diff --git a/readthedocs/projects/models.py b/readthedocs/projects/models.py
--- a/readthedocs/projects/models.py
+++ b/readthedocs/projects/models.py
@@ -158,7 +158,10 @@ class AddonsConfig(TimeStampedModel):
)
# Analytics
- analytics_enabled = models.BooleanField(default=True)
+
+ # NOTE: we keep analytics disabled by default to save resources.
+ # Most projects won't be taking a look at these numbers.
+ analytics_enabled = models.BooleanField(default=False)
# Docdiff
doc_diff_enabled = models.BooleanField(default=True)
| diff --git a/readthedocs/proxito/tests/responses/v0.json b/readthedocs/proxito/tests/responses/v0.json
--- a/readthedocs/proxito/tests/responses/v0.json
+++ b/readthedocs/proxito/tests/responses/v0.json
@@ -79,7 +79,7 @@
},
"addons": {
"analytics": {
- "enabled": true,
+ "enabled": false,
"code": null
},
"external_version_warning": {
| Analytics: make this feature opt-in by default
We want to make analytics feature opt-in by default because it hits the db for each page view and there are bunch of projects that are not interested in this data. They can always go to the project's setting and enable it if they want.
* Reference: https://github.com/readthedocs/meta/discussions/130#discussioncomment-7043655
| 2024-01-23T16:57:16 |
|
readthedocs/readthedocs.org | 11,073 | readthedocs__readthedocs.org-11073 | [
"10701"
] | dcd46559a9d9a5a654edede3a1edc91e8a1a30f0 | diff --git a/readthedocs/doc_builder/backends/sphinx.py b/readthedocs/doc_builder/backends/sphinx.py
--- a/readthedocs/doc_builder/backends/sphinx.py
+++ b/readthedocs/doc_builder/backends/sphinx.py
@@ -295,7 +295,6 @@ def build(self):
build_command = [
*self.get_sphinx_cmd(),
"-T",
- "-E",
]
if self.config.sphinx.fail_on_warning:
build_command.extend(["-W", "--keep-going"])
@@ -483,7 +482,6 @@ def build(self):
self.run(
*self.get_sphinx_cmd(),
"-T",
- "-E",
"-b",
self.sphinx_builder,
"-d",
| diff --git a/readthedocs/projects/tests/test_build_tasks.py b/readthedocs/projects/tests/test_build_tasks.py
--- a/readthedocs/projects/tests/test_build_tasks.py
+++ b/readthedocs/projects/tests/test_build_tasks.py
@@ -191,7 +191,6 @@ def test_build_sphinx_formats(self, load_yaml_config, formats, builders):
"-m",
"sphinx",
"-T",
- "-E",
"-b",
"html",
"-d",
@@ -212,7 +211,6 @@ def test_build_sphinx_formats(self, load_yaml_config, formats, builders):
"-m",
"sphinx",
"-T",
- "-E",
"-b",
builder,
"-d",
@@ -829,7 +827,6 @@ def test_build_commands_executed(
"-m",
"sphinx",
"-T",
- "-E",
"-b",
"html",
"-d",
@@ -846,7 +843,6 @@ def test_build_commands_executed(
"-m",
"sphinx",
"-T",
- "-E",
"-b",
"readthedocssinglehtmllocalmedia",
"-d",
@@ -891,7 +887,6 @@ def test_build_commands_executed(
"-m",
"sphinx",
"-T",
- "-E",
"-b",
"latex",
"-d",
@@ -911,7 +906,6 @@ def test_build_commands_executed(
"-m",
"sphinx",
"-T",
- "-E",
"-b",
"epub",
"-d",
@@ -1467,7 +1461,6 @@ def test_sphinx_normalized_language(self, load_yaml_config):
"-m",
"sphinx",
"-T",
- "-E",
"-W", # fail on warning flag
"--keep-going", # fail on warning flag
"-b",
@@ -1506,7 +1499,6 @@ def test_sphinx_fail_on_warning(self, load_yaml_config):
"-m",
"sphinx",
"-T",
- "-E",
"-W", # fail on warning flag
"--keep-going", # fail on warning flag
"-b",
@@ -1843,7 +1835,6 @@ def test_sphinx_builder(self, load_yaml_config, value, command):
"-m",
"sphinx",
"-T",
- "-E",
"-b",
command,
"-d",
| Drop `-E` from Sphinx call
A few years ago (see #6966), there was some effort to speed up subsequent builds with different builders by sharing the "doctree" directory.
A remnant of this is still visible since the Sphinx arguments contain `-d _build/doctrees`.
However, I recently noticed that the Sphinx command now also contain the `-E` flag, which destroys most of the savings expected from sharing doctrees.
If the `-E` flag is intentionally used for some reason, the `-d` argument should probably be removed to avoid confusion.
## Details
* Read the Docs project URL: https://readthedocs.org/projects/nbsphinx/
* Build URL (if applicable): https://readthedocs.org/projects/nbsphinx/builds/21799832/
* Read the Docs username (if applicable): https://readthedocs.org/profiles/geier/
## Expected Result
Different builders re-use the doctrees, sources are only read for the first build, all subsequent builds are very quick.
## Actual Result
Each build reads all source files, not re-using previous doctrees.
| Thanks for reporting this. We should probably remove both since there is no shared state between each build anymore.
@mgeier Hrm... Thinking a little more about this. Since we are building the HTML and then the PDF, aren't these toctrees shared between those commands?
> Since we are building the HTML and then the PDF, aren't these toctrees shared between those commands?
I thought that was the intention a few years ago, and this is normally achieved by making sure to specify the same doctree directory with `-d`, which is the case here.
However, this is all obliterated by the `-E` flag, which means `don't use a saved environment, always read all files`.
So even though the doctree directory is technically shared and re-used, because of the `-E` flag all source files are still read and Sphinx's environment is re-built from scratch.
Thanks for the clarification here.
@stsewd @ericholscher what do you think it's the correct movement here?
* Should we remove both options and keep it working as it's now?
* Should we remove only `-E` and get the benefits of sharing the parsed doctree between HTML, ePUB and PDF?
Since the root directory is always clean, I think it makes sense to only remove `-E` which will speed up some projects. In particular, those with many files and building multiple versions.
+1 on removing the -E option
Yea, I think we don't need it anymore since we aren't reusing build directories across builds. | 2024-01-29T15:58:14 |
readthedocs/readthedocs.org | 11,074 | readthedocs__readthedocs.org-11074 | [
"10884"
] | dcd46559a9d9a5a654edede3a1edc91e8a1a30f0 | diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py
--- a/readthedocs/builds/constants_docker.py
+++ b/readthedocs/builds/constants_docker.py
@@ -46,21 +46,23 @@
"nodejs": {
"14": "14.20.1",
"16": "16.18.1",
- "18": "18.16.1", # LTS
+ "18": "18.16.1",
"19": "19.0.1",
- "20": "20.3.1",
+ "20": "20.11.0", # LTS
},
"rust": {
"1.55": "1.55.0",
"1.61": "1.61.0",
"1.64": "1.64.0",
"1.70": "1.70.0",
+ "1.75": "1.75.0",
},
"golang": {
"1.17": "1.17.13",
"1.18": "1.18.10",
"1.19": "1.19.10",
"1.20": "1.20.5",
+ "1.21": "1.21.6",
},
},
}
| Build: add support for Go 1.21
## What's the problem this feature will solve?
Some projects require Go 1.21 to build.
## Describe the solution you'd like
Go 1.21 is the current latest stable release and RTD should support it.
## Alternative solutions
N/A
## Additional context
https://go.dev/doc/devel/release
| Thanks for the suggestion. I'm adding this to the roadmap so I can add support once we can prioritize it. Are you currently unable to build your documentation due to this or is it OK for now to keep using 1.20?
Thanks for asking! We reverted to using Go 1.21 for the interim so it's not an urgent matter for now, thanks! | 2024-01-29T16:14:16 |
|
readthedocs/readthedocs.org | 11,075 | readthedocs__readthedocs.org-11075 | [
"10346"
] | dcd46559a9d9a5a654edede3a1edc91e8a1a30f0 | diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py
--- a/readthedocs/builds/constants_docker.py
+++ b/readthedocs/builds/constants_docker.py
@@ -50,6 +50,9 @@
"19": "19.0.1",
"20": "20.3.1",
},
+ "ruby": {
+ "3.3": "3.3.0",
+ },
"rust": {
"1.55": "1.55.0",
"1.61": "1.61.0",
| Build: support Ruby under `build.tools`
We should add support for Ruby on `build.tools`. It will be useful for doctools like Jekyll.
Work required:
- [x] Update the documentation
- [x] Install asdf-ruby (https://github.com/asdf-vm/asdf-ruby) on Docker images
- [x] Compile latest Ruby version and upload it S3 (happening at https://app.circleci.com/pipelines/github/readthedocs/readthedocs-docker-images/289/workflows/f1bc7c62-02d8-4353-ac94-972eb58b0675/jobs/503)
- [x] Update `settings.py` to add this tool and version
- [x] Update config v2 to accept this value
- [x] Create a branch on `test-builds` for this use case
> **Note**: we had a support request for this at https://github.com/readthedocs/readthedocs.org/issues/9599#issuecomment-1560011462
| I note there are other people installing Julia as well in a pretty weird form. We should probably document how to install _any tool supported by `asdf`_ in an easy way. The process is like:
```yaml
build:
commands:
- asdf plugin add <tool> <asdf plugin url>
- asdf install <tool> <version>
- asdf global <tool> <version>
```
Users should be able to do this _without_ our intervention. However, for those tools that require to be compiled or are heavy in network transfer, we should probably include them in our S3 cache.
FYI, this (storing in S3) would save 379s. 89s to compile the required modules for Jekyll would remain. I’d be fine with only a recent (3.2 or 3.1 ruby) cached.
Note to future travelers: using the `build.command` key as mentioned above will result in readthedocs' pre-defined process being entirely overridden.
If you just want to extend the build with additional steps, the following may be more appropriate:
```yaml
build:
jobs:
post_system_dependencies
- asdf plugin add <tool> <asdf plugin url>
- asdf install <tool> <version>
- asdf global <tool> <version>
``` | 2024-01-29T16:29:28 |
|
readthedocs/readthedocs.org | 11,127 | readthedocs__readthedocs.org-11127 | [
"11125"
] | 4e7e1f7819807df0d33adc9b37d951bfeabe7547 | diff --git a/readthedocs/projects/migrations/0115_add_addonsconfig_history.py b/readthedocs/projects/migrations/0115_add_addonsconfig_history.py
new file mode 100644
--- /dev/null
+++ b/readthedocs/projects/migrations/0115_add_addonsconfig_history.py
@@ -0,0 +1,137 @@
+# Generated by Django 4.2.10 on 2024-02-19 15:21
+
+import django.db.models.deletion
+import django_extensions.db.fields
+import simple_history.models
+from django.conf import settings
+from django.db import migrations, models
+from django_safemigrate import Safe
+
+
+class Migration(migrations.Migration):
+ safe = Safe.before_deploy
+
+ dependencies = [
+ migrations.swappable_dependency(settings.AUTH_USER_MODEL),
+ ("projects", "0114_set_timestamp_fields_as_no_null"),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name="HistoricalAddonsConfig",
+ fields=[
+ (
+ "id",
+ models.IntegerField(
+ auto_created=True, blank=True, db_index=True, verbose_name="ID"
+ ),
+ ),
+ (
+ "created",
+ django_extensions.db.fields.CreationDateTimeField(
+ auto_now_add=True, verbose_name="created"
+ ),
+ ),
+ (
+ "modified",
+ django_extensions.db.fields.ModificationDateTimeField(
+ auto_now=True, verbose_name="modified"
+ ),
+ ),
+ (
+ "extra_history_user_id",
+ models.IntegerField(
+ blank=True, db_index=True, null=True, verbose_name="ID"
+ ),
+ ),
+ (
+ "extra_history_user_username",
+ models.CharField(
+ db_index=True,
+ max_length=150,
+ null=True,
+ verbose_name="username",
+ ),
+ ),
+ (
+ "extra_history_ip",
+ models.CharField(
+ blank=True, max_length=250, null=True, verbose_name="IP address"
+ ),
+ ),
+ (
+ "extra_history_browser",
+ models.CharField(
+ blank=True,
+ max_length=250,
+ null=True,
+ verbose_name="Browser user-agent",
+ ),
+ ),
+ (
+ "enabled",
+ models.BooleanField(
+ default=True,
+ help_text="Enable/Disable all the addons on this project",
+ ),
+ ),
+ ("analytics_enabled", models.BooleanField(default=False)),
+ ("doc_diff_enabled", models.BooleanField(default=True)),
+ ("doc_diff_show_additions", models.BooleanField(default=True)),
+ ("doc_diff_show_deletions", models.BooleanField(default=True)),
+ (
+ "doc_diff_root_selector",
+ models.CharField(blank=True, max_length=128, null=True),
+ ),
+ ("external_version_warning_enabled", models.BooleanField(default=True)),
+ ("ethicalads_enabled", models.BooleanField(default=True)),
+ ("flyout_enabled", models.BooleanField(default=True)),
+ ("hotkeys_enabled", models.BooleanField(default=True)),
+ ("search_enabled", models.BooleanField(default=True)),
+ (
+ "search_default_filter",
+ models.CharField(blank=True, max_length=128, null=True),
+ ),
+ (
+ "stable_latest_version_warning_enabled",
+ models.BooleanField(default=True),
+ ),
+ ("history_id", models.AutoField(primary_key=True, serialize=False)),
+ ("history_date", models.DateTimeField()),
+ ("history_change_reason", models.CharField(max_length=100, null=True)),
+ (
+ "history_type",
+ models.CharField(
+ choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")],
+ max_length=1,
+ ),
+ ),
+ (
+ "history_user",
+ models.ForeignKey(
+ null=True,
+ on_delete=django.db.models.deletion.SET_NULL,
+ related_name="+",
+ to=settings.AUTH_USER_MODEL,
+ ),
+ ),
+ (
+ "project",
+ models.ForeignKey(
+ blank=True,
+ db_constraint=False,
+ null=True,
+ on_delete=django.db.models.deletion.DO_NOTHING,
+ related_name="+",
+ to="projects.project",
+ ),
+ ),
+ ],
+ options={
+ "verbose_name": "historical addons config",
+ "ordering": ("-history_date", "-history_id"),
+ "get_latest_by": "history_date",
+ },
+ bases=(simple_history.models.HistoricalChanges, models.Model),
+ ),
+ ]
diff --git a/readthedocs/projects/models.py b/readthedocs/projects/models.py
--- a/readthedocs/projects/models.py
+++ b/readthedocs/projects/models.py
@@ -101,7 +101,7 @@ class ProjectRelationship(models.Model):
on_delete=models.CASCADE,
)
alias = models.SlugField(
- _('Alias'),
+ _("Alias"),
max_length=255,
null=True,
blank=True,
@@ -111,7 +111,7 @@ class ProjectRelationship(models.Model):
objects = ChildRelatedProjectQuerySet.as_manager()
def __str__(self):
- return '{} -> {}'.format(self.parent, self.child)
+ return "{} -> {}".format(self.parent, self.child)
def save(self, *args, **kwargs):
if not self.alias:
@@ -146,6 +146,9 @@ class AddonsConfig(TimeStampedModel):
DOC_DIFF_DEFAULT_ROOT_SELECTOR = "[role=main]"
+ # Model history
+ history = ExtraHistoricalRecords()
+
project = models.OneToOneField(
"Project",
related_name="addons",
@@ -209,48 +212,52 @@ class Project(models.Model):
"""Project model."""
# Auto fields
- pub_date = models.DateTimeField(_('Publication date'), auto_now_add=True, db_index=True)
- modified_date = models.DateTimeField(_('Modified date'), auto_now=True, db_index=True)
+ pub_date = models.DateTimeField(
+ _("Publication date"), auto_now_add=True, db_index=True
+ )
+ modified_date = models.DateTimeField(
+ _("Modified date"), auto_now=True, db_index=True
+ )
# Generally from conf.py
users = models.ManyToManyField(
User,
- verbose_name=_('User'),
- related_name='projects',
+ verbose_name=_("User"),
+ related_name="projects",
)
# A DNS label can contain up to 63 characters.
- name = models.CharField(_('Name'), max_length=63)
- slug = models.SlugField(_('Slug'), max_length=63, unique=True)
+ name = models.CharField(_("Name"), max_length=63)
+ slug = models.SlugField(_("Slug"), max_length=63, unique=True)
description = models.TextField(
- _('Description'),
+ _("Description"),
blank=True,
- help_text=_('Short description of this project'),
+ help_text=_("Short description of this project"),
)
repo = models.CharField(
- _('Repository URL'),
+ _("Repository URL"),
max_length=255,
validators=[validate_repository_url],
- help_text=_('Hosted documentation repository URL'),
+ help_text=_("Hosted documentation repository URL"),
db_index=True,
)
# NOTE: this field is going to be completely removed soon.
# We only accept Git for new repositories
repo_type = models.CharField(
- _('Repository type'),
+ _("Repository type"),
max_length=10,
choices=constants.REPO_CHOICES,
- default='git',
+ default="git",
)
project_url = models.URLField(
- _('Project homepage'),
+ _("Project homepage"),
blank=True,
- help_text=_('The project\'s homepage'),
+ help_text=_("The project's homepage"),
)
canonical_url = models.URLField(
- _('Canonical URL'),
+ _("Canonical URL"),
blank=True,
- help_text=_('URL that documentation is expected to serve from'),
+ help_text=_("URL that documentation is expected to serve from"),
)
versioning_scheme = models.CharField(
_("Versioning scheme"),
@@ -267,7 +274,7 @@ class Project(models.Model):
)
# TODO: this field is deprecated, use `versioning_scheme` instead.
single_version = models.BooleanField(
- _('Single version'),
+ _("Single version"),
default=False,
help_text=_(
"A single version site has no translations and only your "
@@ -277,15 +284,15 @@ class Project(models.Model):
),
)
default_version = models.CharField(
- _('Default version'),
+ _("Default version"),
max_length=255,
default=LATEST,
- help_text=_('The version of your project that / redirects to'),
+ help_text=_("The version of your project that / redirects to"),
)
# In default_branch, ``None`` means the backend will use the default branch
# cloned for each backend.
default_branch = models.CharField(
- _('Default branch'),
+ _("Default branch"),
max_length=255,
default=None,
null=True,
@@ -345,14 +352,14 @@ class Project(models.Model):
# External versions
external_builds_enabled = models.BooleanField(
- _('Build pull requests for this project'),
+ _("Build pull requests for this project"),
default=False,
help_text=_(
'More information in <a href="https://docs.readthedocs.io/page/guides/autobuild-docs-for-pull-requests.html">our docs</a>.' # noqa
),
)
external_builds_privacy_level = models.CharField(
- _('Privacy level of Pull Requests'),
+ _("Privacy level of Pull Requests"),
max_length=20,
# TODO: remove after migration
null=True,
@@ -364,79 +371,79 @@ class Project(models.Model):
)
# Project features
- cdn_enabled = models.BooleanField(_('CDN Enabled'), default=False)
+ cdn_enabled = models.BooleanField(_("CDN Enabled"), default=False)
analytics_code = models.CharField(
- _('Analytics code'),
+ _("Analytics code"),
max_length=50,
null=True,
blank=True,
help_text=_(
- 'Google Analytics Tracking ID '
- '(ex. <code>UA-22345342-1</code>). '
- 'This may slow down your page loads.',
+ "Google Analytics Tracking ID "
+ "(ex. <code>UA-22345342-1</code>). "
+ "This may slow down your page loads.",
),
)
analytics_disabled = models.BooleanField(
- _('Disable Analytics'),
+ _("Disable Analytics"),
default=False,
null=True,
help_text=_(
- 'Disable Google Analytics completely for this project '
- '(requires rebuilding documentation)',
+ "Disable Google Analytics completely for this project "
+ "(requires rebuilding documentation)",
),
)
container_image = models.CharField(
- _('Alternative container image'),
+ _("Alternative container image"),
max_length=64,
null=True,
blank=True,
)
container_mem_limit = models.CharField(
- _('Container memory limit'),
+ _("Container memory limit"),
max_length=10,
null=True,
blank=True,
help_text=_(
- 'Memory limit in Docker format '
- '-- example: <code>512m</code> or <code>1g</code>',
+ "Memory limit in Docker format "
+ "-- example: <code>512m</code> or <code>1g</code>",
),
)
container_time_limit = models.IntegerField(
- _('Container time limit in seconds'),
+ _("Container time limit in seconds"),
null=True,
blank=True,
)
build_queue = models.CharField(
- _('Alternate build queue id'),
+ _("Alternate build queue id"),
max_length=32,
null=True,
blank=True,
)
max_concurrent_builds = models.IntegerField(
- _('Maximum concurrent builds allowed for this project'),
+ _("Maximum concurrent builds allowed for this project"),
null=True,
blank=True,
)
allow_promos = models.BooleanField(
- _('Allow paid advertising'),
+ _("Allow paid advertising"),
default=True,
- help_text=_('If unchecked, users will still see community ads.'),
+ help_text=_("If unchecked, users will still see community ads."),
)
ad_free = models.BooleanField(
- _('Ad-free'),
+ _("Ad-free"),
default=False,
- help_text='If checked, do not show advertising for this project',
+ help_text="If checked, do not show advertising for this project",
)
is_spam = models.BooleanField(
- _('Is spam?'),
+ _("Is spam?"),
default=None,
null=True,
- help_text=_('Manually marked as (not) spam'),
+ help_text=_("Manually marked as (not) spam"),
)
show_version_warning = models.BooleanField(
- _('Show version warning'),
+ _("Show version warning"),
default=False,
- help_text=_('Show warning banner in non-stable nor latest versions.'),
+ help_text=_("Show warning banner in non-stable nor latest versions."),
)
readthedocs_yaml_path = models.CharField(
@@ -454,7 +461,7 @@ class Project(models.Model):
validators=[validate_build_config_file],
)
- featured = models.BooleanField(_('Featured'), default=False)
+ featured = models.BooleanField(_("Featured"), default=False)
skip = models.BooleanField(_("Skip (disable) building this project"), default=False)
@@ -472,19 +479,19 @@ class Project(models.Model):
)
privacy_level = models.CharField(
- _('Privacy Level'),
+ _("Privacy Level"),
max_length=20,
choices=constants.PRIVACY_CHOICES,
default=settings.DEFAULT_PRIVACY_LEVEL,
help_text=_(
- 'Should the project dashboard be public?',
+ "Should the project dashboard be public?",
),
)
# Subprojects
related_projects = models.ManyToManyField(
- 'self',
- verbose_name=_('Related projects'),
+ "self",
+ verbose_name=_("Related projects"),
blank=True,
symmetrical=False,
through=ProjectRelationship,
@@ -492,31 +499,31 @@ class Project(models.Model):
# Language bits
language = models.CharField(
- _('Language'),
+ _("Language"),
max_length=20,
- default='en',
+ default="en",
help_text=_(
- 'The language the project '
- 'documentation is rendered in. '
+ "The language the project "
+ "documentation is rendered in. "
"Note: this affects your project's URL.",
),
choices=constants.LANGUAGES,
)
programming_language = models.CharField(
- _('Programming Language'),
+ _("Programming Language"),
max_length=20,
- default='words',
+ default="words",
help_text=_(
- 'The primary programming language the project is written in.',
+ "The primary programming language the project is written in.",
),
choices=constants.PROGRAMMING_LANGUAGES,
blank=True,
)
# A subproject pointed at its main language, so it can be tracked
main_language_project = models.ForeignKey(
- 'self',
- related_name='translations',
+ "self",
+ related_name="translations",
on_delete=models.SET_NULL,
blank=True,
null=True,
@@ -524,11 +531,11 @@ class Project(models.Model):
has_valid_webhook = models.BooleanField(
default=False,
- help_text=_('This project has been built with a webhook'),
+ help_text=_("This project has been built with a webhook"),
)
has_valid_clone = models.BooleanField(
default=False,
- help_text=_('This project has been successfully cloned'),
+ help_text=_("This project has been successfully cloned"),
)
tags = TaggableManager(blank=True, ordering=["name"])
@@ -536,9 +543,9 @@ class Project(models.Model):
objects = ProjectQuerySet.as_manager()
remote_repository = models.ForeignKey(
- 'oauth.RemoteRepository',
+ "oauth.RemoteRepository",
on_delete=models.SET_NULL,
- related_name='projects',
+ related_name="projects",
null=True,
blank=True,
)
@@ -645,10 +652,10 @@ class Project(models.Model):
)
# Property used for storing the latest build for a project when prefetching
- LATEST_BUILD_CACHE = '_latest_build'
+ LATEST_BUILD_CACHE = "_latest_build"
class Meta:
- ordering = ('slug',)
+ ordering = ("slug",)
verbose_name = _("project")
def __str__(self):
@@ -702,7 +709,7 @@ def clean(self):
)
def get_absolute_url(self):
- return reverse('projects_detail', args=[self.slug])
+ return reverse("projects_detail", args=[self.slug])
def get_docs_url(self, version_slug=None, lang_slug=None, external=False):
"""
@@ -719,9 +726,9 @@ def get_docs_url(self, version_slug=None, lang_slug=None, external=False):
def get_builds_url(self):
return reverse(
- 'builds_project_list',
+ "builds_project_list",
kwargs={
- 'project_slug': self.slug,
+ "project_slug": self.slug,
},
)
@@ -732,18 +739,11 @@ def get_storage_paths(self):
:return: the path to an item in storage
(can be used with ``storage.url`` to get the URL).
"""
- storage_paths = [
- f'{type_}/{self.slug}'
- for type_ in MEDIA_TYPES
- ]
+ storage_paths = [f"{type_}/{self.slug}" for type_ in MEDIA_TYPES]
return storage_paths
def get_storage_path(
- self,
- type_,
- version_slug=LATEST,
- include_file=True,
- version_type=None
+ self, type_, version_slug=LATEST, include_file=True, version_type=None
):
"""
Get a path to a build artifact for use with Django's storage system.
@@ -764,7 +764,7 @@ def get_storage_path(
type_dir = type_
# Add `external/` prefix for external versions
if version_type == EXTERNAL:
- type_dir = f'{EXTERNAL}/{type_}'
+ type_dir = f"{EXTERNAL}/{type_}"
# Version slug may come from an unstrusted input,
# so we use join to avoid any path traversal.
@@ -772,8 +772,8 @@ def get_storage_path(
folder_path = build_media_storage.join(f"{type_dir}/{self.slug}", version_slug)
if include_file:
- extension = type_.replace('htmlzip', 'zip')
- return '{}/{}.{}'.format(
+ extension = type_.replace("htmlzip", "zip")
+ return "{}/{}.{}".format(
folder_path,
self.slug,
extension,
@@ -793,10 +793,10 @@ def get_production_media_url(self, type_, version_slug):
main_project = self.main_language_project or self
if main_project.is_subproject:
# docs.example.com/_/downloads/<alias>/<lang>/<ver>/pdf/
- path = f'//{domain}/{self.proxied_api_url}downloads/{main_project.alias}/{self.language}/{version_slug}/{type_}/' # noqa
+ path = f"//{domain}/{self.proxied_api_url}downloads/{main_project.alias}/{self.language}/{version_slug}/{type_}/" # noqa
else:
# docs.example.com/_/downloads/<lang>/<ver>/pdf/
- path = f'//{domain}/{self.proxied_api_url}downloads/{self.language}/{version_slug}/{type_}/' # noqa
+ path = f"//{domain}/{self.proxied_api_url}downloads/{self.language}/{version_slug}/{type_}/" # noqa
return path
@@ -811,7 +811,7 @@ def proxied_api_host(self):
custom_prefix = self.proxied_api_prefix
if custom_prefix:
return unsafe_join_url_path(custom_prefix, "/_")
- return '/_'
+ return "/_"
@property
def proxied_api_url(self):
@@ -820,7 +820,7 @@ def proxied_api_url(self):
It can't start with a /, but has to end with one.
"""
- return self.proxied_api_host.strip('/') + '/'
+ return self.proxied_api_host.strip("/") + "/"
@property
def proxied_static_path(self):
@@ -914,7 +914,7 @@ def get_downloads(self):
downloads = {}
default_version = self.get_default_version()
- for type_ in ('htmlzip', 'epub', 'pdf'):
+ for type_ in ("htmlzip", "epub", "pdf"):
downloads[type_] = self.get_production_media_url(
type_,
default_version,
@@ -927,8 +927,8 @@ def clean_repo(self):
# NOTE: this method is used only when the project is going to be clonned.
# It probably makes sense to do a data migrations and force "Import Project"
# form to validate it's an HTTPS URL when importing new ones
- if self.repo.startswith('http://github.com'):
- return self.repo.replace('http://github.com', 'https://github.com')
+ if self.repo.startswith("http://github.com"):
+ return self.repo.replace("http://github.com", "https://github.com")
return self.repo
# Doc PATH:
@@ -936,17 +936,17 @@ def clean_repo(self):
@property
def doc_path(self):
- return os.path.join(settings.DOCROOT, self.slug.replace('_', '-'))
+ return os.path.join(settings.DOCROOT, self.slug.replace("_", "-"))
def checkout_path(self, version=LATEST):
- return os.path.join(self.doc_path, 'checkouts', version)
+ return os.path.join(self.doc_path, "checkouts", version)
def full_doc_path(self, version=LATEST):
"""The path to the documentation root in the project."""
doc_base = self.checkout_path(version)
- for possible_path in ['docs', 'doc', 'Doc']:
- if os.path.exists(os.path.join(doc_base, '%s' % possible_path)):
- return os.path.join(doc_base, '%s' % possible_path)
+ for possible_path in ["docs", "doc", "Doc"]:
+ if os.path.exists(os.path.join(doc_base, "%s" % possible_path)):
+ return os.path.join(doc_base, "%s" % possible_path)
# No docs directory, docs are at top-level.
return doc_base
@@ -968,20 +968,20 @@ def conf_file(self, version=LATEST):
)
if os.path.exists(conf_path):
- log.info('Inserting conf.py file path from model')
+ log.info("Inserting conf.py file path from model")
return conf_path
log.warning("Conf file specified on model doesn't exist")
- files = self.find('conf.py', version)
+ files = self.find("conf.py", version)
if not files:
- files = self.full_find('conf.py', version)
+ files = self.full_find("conf.py", version)
if len(files) == 1:
return files[0]
for filename in files:
# When multiples conf.py files, we look up the first one that
# contains the `doc` word in its path and return this one
- if filename.find('doc', 70) != -1:
+ if filename.find("doc", 70) != -1:
return filename
# If the project has more than one conf.py file but none of them have
@@ -1002,7 +1002,7 @@ def conf_dir(self, version=LATEST):
def has_good_build(self):
# Check if there is `_good_build` annotation in the Queryset.
# Used for Database optimization.
- if hasattr(self, '_good_build'):
+ if hasattr(self, "_good_build"):
return self._good_build
return self.builds(manager=INTERNAL).filter(success=True).exists()
@@ -1059,7 +1059,7 @@ def git_service_class(self):
service = service_cls
break
else:
- log.warning('There are no registered services in the application.')
+ log.warning("There are no registered services in the application.")
service = None
return service
@@ -1112,17 +1112,17 @@ def get_latest_build(self, finished=True):
return self._latest_build[0]
return None
- kwargs = {'type': 'html'}
+ kwargs = {"type": "html"}
if finished:
- kwargs['state'] = 'finished'
+ kwargs["state"] = "finished"
return self.builds(manager=INTERNAL).filter(**kwargs).first()
def active_versions(self):
from readthedocs.builds.models import Version
+
versions = Version.internal.public(project=self, only_active=True)
- return (
- versions.filter(built=True, active=True) |
- versions.filter(active=True, uploaded=True)
+ return versions.filter(built=True, active=True) | versions.filter(
+ active=True, uploaded=True
)
def ordered_active_versions(self, **kwargs):
@@ -1133,29 +1133,30 @@ def ordered_active_versions(self, **kwargs):
`Version.internal.public` queryset.
"""
from readthedocs.builds.models import Version
+
kwargs.update(
{
- 'project': self,
- 'only_active': True,
- 'only_built': True,
+ "project": self,
+ "only_active": True,
+ "only_built": True,
},
)
versions = (
Version.internal.public(**kwargs)
.select_related(
- 'project',
- 'project__main_language_project',
+ "project",
+ "project__main_language_project",
)
.prefetch_related(
Prefetch(
- 'project__superprojects',
- ProjectRelationship.objects.all().select_related('parent'),
- to_attr='_superprojects',
+ "project__superprojects",
+ ProjectRelationship.objects.all().select_related("parent"),
+ to_attr="_superprojects",
),
Prefetch(
- 'project__domains',
+ "project__domains",
Domain.objects.filter(canonical=True),
- to_attr='_canonical_domains',
+ to_attr="_canonical_domains",
),
)
)
@@ -1193,8 +1194,7 @@ def get_original_stable_version(self):
# Several tags can point to the same identifier.
# Return the stable one.
original_stable = determine_stable_version(
- self.versions(manager=INTERNAL)
- .filter(identifier=current_stable.identifier)
+ self.versions(manager=INTERNAL).filter(identifier=current_stable.identifier)
)
return original_stable
@@ -1278,11 +1278,11 @@ def update_stable_version(self):
return new_stable
else:
log.info(
- 'Creating new stable version: %(project)s:%(version)s',
+ "Creating new stable version: %(project)s:%(version)s",
{
- 'project': self.slug,
- 'version': new_stable.identifier,
- }
+ "project": self.slug,
+ "version": new_stable.identifier,
+ },
)
current_stable = self.versions.create_stable(
type=new_stable.type,
@@ -1292,10 +1292,10 @@ def update_stable_version(self):
def versions_from_branch_name(self, branch):
return (
- self.versions.filter(identifier=branch) |
- self.versions.filter(identifier='remotes/origin/%s' % branch) |
- self.versions.filter(identifier='origin/%s' % branch) |
- self.versions.filter(verbose_name=branch)
+ self.versions.filter(identifier=branch)
+ | self.versions.filter(identifier="remotes/origin/%s" % branch)
+ | self.versions.filter(identifier="origin/%s" % branch)
+ | self.versions.filter(verbose_name=branch)
)
def get_default_version(self):
@@ -1345,17 +1345,17 @@ def parent_relationship(self):
It returns ``None`` if this is a top level project.
"""
- if hasattr(self, '_superprojects'):
+ if hasattr(self, "_superprojects"):
# Cached parent project relationship
if self._superprojects:
return self._superprojects[0]
return None
- return self.superprojects.select_related('parent').first()
+ return self.superprojects.select_related("parent").first()
def get_canonical_custom_domain(self):
"""Get the canonical custom domain or None."""
- if hasattr(self, '_canonical_domains'):
+ if hasattr(self, "_canonical_domains"):
# Cached custom domains
if self._canonical_domains:
return self._canonical_domains[0]
@@ -1397,8 +1397,9 @@ def show_advertising(self):
if self.ad_free or self.gold_owners.exists():
return False
- if 'readthedocsext.spamfighting' in settings.INSTALLED_APPS:
+ if "readthedocsext.spamfighting" in settings.INSTALLED_APPS:
from readthedocsext.spamfighting.utils import is_show_ads_denied # noqa
+
return not is_show_ads_denied(self)
return True
@@ -1427,7 +1428,7 @@ def is_valid_as_superproject(self, error_class):
# Check the parent project is not a subproject already
if self.superprojects.exists():
raise error_class(
- _('Subproject nesting is not supported'),
+ _("Subproject nesting is not supported"),
)
def get_subproject_candidates(self, user):
@@ -1482,13 +1483,19 @@ class Meta:
proxy = True
def __init__(self, *args, **kwargs):
- self.features = kwargs.pop('features', [])
- environment_variables = kwargs.pop('environment_variables', {})
- ad_free = (not kwargs.pop('show_advertising', True))
+ self.features = kwargs.pop("features", [])
+ environment_variables = kwargs.pop("environment_variables", {})
+ ad_free = not kwargs.pop("show_advertising", True)
# These fields only exist on the API return, not on the model, so we'll
# remove them to avoid throwing exceptions due to unexpected fields
- for key in ['users', 'resource_uri', 'absolute_url', 'downloads',
- 'main_language_project', 'related_projects']:
+ for key in [
+ "users",
+ "resource_uri",
+ "absolute_url",
+ "downloads",
+ "main_language_project",
+ "related_projects",
+ ]:
try:
del kwargs[key]
except KeyError:
@@ -1523,9 +1530,9 @@ def show_advertising(self):
def environment_variables(self, *, public_only=True):
return {
- name: spec['value']
+ name: spec["value"]
for name, spec in self._environment_variables.items()
- if spec['public'] or not public_only
+ if spec["public"] or not public_only
}
@@ -1540,33 +1547,33 @@ class ImportedFile(models.Model):
project = models.ForeignKey(
Project,
- verbose_name=_('Project'),
- related_name='imported_files',
+ verbose_name=_("Project"),
+ related_name="imported_files",
on_delete=models.CASCADE,
)
version = models.ForeignKey(
- 'builds.Version',
- verbose_name=_('Version'),
- related_name='imported_files',
+ "builds.Version",
+ verbose_name=_("Version"),
+ related_name="imported_files",
null=True,
on_delete=models.CASCADE,
)
- name = models.CharField(_('Name'), max_length=255)
+ name = models.CharField(_("Name"), max_length=255)
# max_length is set to 4096 because linux has a maximum path length
# of 4096 characters for most filesystems (including EXT4).
# https://github.com/rtfd/readthedocs.org/issues/5061
- path = models.CharField(_('Path'), max_length=4096)
- commit = models.CharField(_('Commit'), max_length=255)
- build = models.IntegerField(_('Build id'), null=True)
- modified_date = models.DateTimeField(_('Modified date'), auto_now=True)
+ path = models.CharField(_("Path"), max_length=4096)
+ commit = models.CharField(_("Commit"), max_length=255)
+ build = models.IntegerField(_("Build id"), null=True)
+ modified_date = models.DateTimeField(_("Modified date"), auto_now=True)
rank = models.IntegerField(
- _('Page search rank'),
+ _("Page search rank"),
default=0,
validators=[MinValueValidator(-10), MaxValueValidator(10)],
)
ignore = models.BooleanField(
- _('Ignore this file from operations like indexing'),
+ _("Ignore this file from operations like indexing"),
# default=False,
# TODO: remove after migration
null=True,
@@ -1580,7 +1587,7 @@ def get_absolute_url(self):
)
def __str__(self):
- return '{}: {}'.format(self.name, self.project)
+ return "{}: {}".format(self.name, self.project)
class HTMLFile(ImportedFile):
@@ -1612,19 +1619,19 @@ class Notification(TimeStampedModel):
# TODO: Overridden from TimeStampedModel just to allow null values,
# remove after deploy.
created = CreationDateTimeField(
- _('created'),
+ _("created"),
null=True,
blank=True,
)
modified = ModificationDateTimeField(
- _('modified'),
+ _("modified"),
null=True,
blank=True,
)
project = models.ForeignKey(
Project,
- related_name='%(class)s_notifications',
+ related_name="%(class)s_notifications",
on_delete=models.CASCADE,
)
objects = RelatedProjectQuerySet.as_manager()
@@ -1641,15 +1648,14 @@ def __str__(self):
class WebHookEvent(models.Model):
-
- BUILD_TRIGGERED = 'build:triggered'
- BUILD_PASSED = 'build:passed'
- BUILD_FAILED = 'build:failed'
+ BUILD_TRIGGERED = "build:triggered"
+ BUILD_PASSED = "build:passed"
+ BUILD_FAILED = "build:failed"
EVENTS = (
- (BUILD_TRIGGERED, _('Build triggered')),
- (BUILD_PASSED, _('Build passed')),
- (BUILD_FAILED, _('Build failed')),
+ (BUILD_TRIGGERED, _("Build triggered")),
+ (BUILD_PASSED, _("Build passed")),
+ (BUILD_FAILED, _("Build failed")),
)
name = models.CharField(
@@ -1663,27 +1669,26 @@ def __str__(self):
class WebHook(Notification):
-
url = models.URLField(
- _('URL'),
+ _("URL"),
max_length=600,
- help_text=_('URL to send the webhook to'),
+ help_text=_("URL to send the webhook to"),
)
secret = models.CharField(
- help_text=_('Secret used to sign the payload of the webhook'),
+ help_text=_("Secret used to sign the payload of the webhook"),
max_length=255,
blank=True,
null=True,
)
events = models.ManyToManyField(
WebHookEvent,
- related_name='webhooks',
- help_text=_('Events to subscribe'),
+ related_name="webhooks",
+ help_text=_("Events to subscribe"),
)
payload = models.TextField(
- _('JSON payload'),
+ _("JSON payload"),
help_text=_(
- 'JSON payload to send to the webhook. '
+ "JSON payload to send to the webhook. "
'Check <a href="https://docs.readthedocs.io/page/build-notifications.html#variable-substitutions-reference">the docs</a> for available substitutions.', # noqa
),
blank=True,
@@ -1691,8 +1696,8 @@ class WebHook(Notification):
max_length=25000,
)
exchanges = GenericRelation(
- 'integrations.HttpExchange',
- related_query_name='webhook',
+ "integrations.HttpExchange",
+ related_query_name="webhook",
)
def save(self, *args, **kwargs):
@@ -1712,49 +1717,54 @@ def get_payload(self, version, build, event):
project = version.project
organization = project.organizations.first()
- organization_name = ''
- organization_slug = ''
+ organization_name = ""
+ organization_slug = ""
if organization:
organization_slug = organization.slug
organization_name = organization.name
# Commit can be None, display an empty string instead.
- commit = build.commit or ''
- protocol = 'http' if settings.DEBUG else 'https'
- project_url = f'{protocol}://{settings.PRODUCTION_DOMAIN}{project.get_absolute_url()}'
- build_url = f'{protocol}://{settings.PRODUCTION_DOMAIN}{build.get_absolute_url()}'
+ commit = build.commit or ""
+ protocol = "http" if settings.DEBUG else "https"
+ project_url = (
+ f"{protocol}://{settings.PRODUCTION_DOMAIN}{project.get_absolute_url()}"
+ )
+ build_url = (
+ f"{protocol}://{settings.PRODUCTION_DOMAIN}{build.get_absolute_url()}"
+ )
build_docsurl = Resolver().resolve_version(project, version=version)
# Remove timezone and microseconds from the date,
# so it's more readable.
- start_date = build.date.replace(
- tzinfo=None,
- microsecond=0
- ).isoformat()
+ start_date = build.date.replace(tzinfo=None, microsecond=0).isoformat()
substitutions = {
- 'event': event,
- 'build.id': build.id,
- 'build.commit': commit,
- 'build.url': build_url,
- 'build.docs_url': build_docsurl,
- 'build.start_date': start_date,
- 'organization.name': organization_name,
- 'organization.slug': organization_slug,
- 'project.slug': project.slug,
- 'project.name': project.name,
- 'project.url': project_url,
- 'version.slug': version.slug,
- 'version.name': version.verbose_name,
+ "event": event,
+ "build.id": build.id,
+ "build.commit": commit,
+ "build.url": build_url,
+ "build.docs_url": build_docsurl,
+ "build.start_date": start_date,
+ "organization.name": organization_name,
+ "organization.slug": organization_slug,
+ "project.slug": project.slug,
+ "project.name": project.name,
+ "project.url": project_url,
+ "version.slug": version.slug,
+ "version.name": version.verbose_name,
}
payload = self.payload
# Small protection for DDoS.
max_substitutions = 99
for substitution, value in substitutions.items():
# Replace {{ foo }}.
- payload = payload.replace(f'{{{{ {substitution} }}}}', str(value), max_substitutions)
+ payload = payload.replace(
+ f"{{{{ {substitution} }}}}", str(value), max_substitutions
+ )
# Replace {{foo}}.
- payload = payload.replace(f'{{{{{substitution}}}}}', str(value), max_substitutions)
+ payload = payload.replace(
+ f"{{{{{substitution}}}}}", str(value), max_substitutions
+ )
return payload
def sign_payload(self, payload):
@@ -1767,7 +1777,7 @@ def sign_payload(self, payload):
return digest.hexdigest()
def __str__(self):
- return f'{self.project.slug} {self.url}'
+ return f"{self.project.slug} {self.url}"
class Domain(TimeStampedModel):
@@ -1777,18 +1787,18 @@ class Domain(TimeStampedModel):
# TODO: Overridden from TimeStampedModel just to allow null values,
# remove after deploy.
created = CreationDateTimeField(
- _('created'),
+ _("created"),
null=True,
blank=True,
)
project = models.ForeignKey(
Project,
- related_name='domains',
+ related_name="domains",
on_delete=models.CASCADE,
)
domain = models.CharField(
- _('Domain'),
+ _("Domain"),
unique=True,
max_length=255,
validators=[validate_domain_name, validate_no_ip],
@@ -1808,18 +1818,18 @@ class Domain(TimeStampedModel):
),
)
https = models.BooleanField(
- _('Use HTTPS'),
+ _("Use HTTPS"),
default=True,
- help_text=_('Always use HTTPS for this domain'),
+ help_text=_("Always use HTTPS for this domain"),
)
count = models.IntegerField(
default=0,
- help_text=_('Number of times this domain has been hit'),
+ help_text=_("Number of times this domain has been hit"),
)
# This is used in readthedocsext.
ssl_status = models.CharField(
- _('SSL certificate status'),
+ _("SSL certificate status"),
max_length=30,
choices=constants.SSL_STATUS_CHOICES,
default=constants.SSL_STATUS_UNKNOWN,
@@ -1841,24 +1851,26 @@ class Domain(TimeStampedModel):
# and hard to back out changes cleanly
hsts_max_age = models.PositiveIntegerField(
default=0,
- help_text=_('Set a custom max-age (eg. 31536000) for the HSTS header')
+ help_text=_("Set a custom max-age (eg. 31536000) for the HSTS header"),
)
hsts_include_subdomains = models.BooleanField(
default=False,
- help_text=_('If hsts_max_age > 0, set the includeSubDomains flag with the HSTS header')
+ help_text=_(
+ "If hsts_max_age > 0, set the includeSubDomains flag with the HSTS header"
+ ),
)
hsts_preload = models.BooleanField(
default=False,
- help_text=_('If hsts_max_age > 0, set the preload flag with the HSTS header')
+ help_text=_("If hsts_max_age > 0, set the preload flag with the HSTS header"),
)
objects = DomainQueryset.as_manager()
class Meta:
- ordering = ('-canonical', '-machine', 'domain')
+ ordering = ("-canonical", "-machine", "domain")
def __str__(self):
- return '{domain} pointed at {project}'.format(
+ return "{domain} pointed at {project}".format(
domain=self.domain,
project=self.project.name,
)
@@ -1918,7 +1930,7 @@ class HTTPHeader(TimeStampedModel, models.Model):
domain = models.ForeignKey(
Domain,
- related_name='http_headers',
+ related_name="http_headers",
on_delete=models.CASCADE,
)
name = models.CharField(
@@ -1927,7 +1939,7 @@ class HTTPHeader(TimeStampedModel, models.Model):
)
value = models.CharField(max_length=4096)
only_if_secure_request = models.BooleanField(
- help_text='Only set this header if the request is secure (HTTPS)',
+ help_text="Only set this header if the request is secure (HTTPS)",
)
def __str__(self):
@@ -1963,21 +1975,21 @@ def add_features(sender, **kwargs):
ALLOW_VERSION_WARNING_BANNER = "allow_version_warning_banner"
# Versions sync related features
- SKIP_SYNC_TAGS = 'skip_sync_tags'
- SKIP_SYNC_BRANCHES = 'skip_sync_branches'
- SKIP_SYNC_VERSIONS = 'skip_sync_versions'
+ SKIP_SYNC_TAGS = "skip_sync_tags"
+ SKIP_SYNC_BRANCHES = "skip_sync_branches"
+ SKIP_SYNC_VERSIONS = "skip_sync_versions"
# Dependencies related features
- PIP_ALWAYS_UPGRADE = 'pip_always_upgrade'
- USE_NEW_PIP_RESOLVER = 'use_new_pip_resolver'
- DONT_INSTALL_LATEST_PIP = 'dont_install_latest_pip'
- USE_SPHINX_RTD_EXT_LATEST = 'rtd_sphinx_ext_latest'
+ PIP_ALWAYS_UPGRADE = "pip_always_upgrade"
+ USE_NEW_PIP_RESOLVER = "use_new_pip_resolver"
+ DONT_INSTALL_LATEST_PIP = "dont_install_latest_pip"
+ USE_SPHINX_RTD_EXT_LATEST = "rtd_sphinx_ext_latest"
INSTALL_LATEST_CORE_REQUIREMENTS = "install_latest_core_requirements"
# Search related features
- DISABLE_SERVER_SIDE_SEARCH = 'disable_server_side_search'
- ENABLE_MKDOCS_SERVER_SIDE_SEARCH = 'enable_mkdocs_server_side_search'
- DEFAULT_TO_FUZZY_SEARCH = 'default_to_fuzzy_search'
+ DISABLE_SERVER_SIDE_SEARCH = "disable_server_side_search"
+ ENABLE_MKDOCS_SERVER_SIDE_SEARCH = "enable_mkdocs_server_side_search"
+ DEFAULT_TO_FUZZY_SEARCH = "default_to_fuzzy_search"
# Build related features
SCALE_IN_PROTECTION = "scale_in_prtection"
@@ -2024,7 +2036,6 @@ def add_features(sender, **kwargs):
ALLOW_VERSION_WARNING_BANNER,
_("Dashboard: Allow project to use the version warning banner."),
),
-
# Versions sync related features
(
SKIP_SYNC_BRANCHES,
@@ -2038,7 +2049,6 @@ def add_features(sender, **kwargs):
SKIP_SYNC_VERSIONS,
_("Webhook: Skip sync versions task"),
),
-
# Dependencies related features
(PIP_ALWAYS_UPGRADE, _("Build: Always run pip install --upgrade")),
(USE_NEW_PIP_RESOLVER, _("Build: Use new pip resolver")),
@@ -2056,7 +2066,6 @@ def add_features(sender, **kwargs):
"Build: Install all the latest versions of Read the Docs core requirements"
),
),
-
# Search related features.
(
DISABLE_SERVER_SIDE_SEARCH,
@@ -2086,29 +2095,31 @@ def add_features(sender, **kwargs):
# Feature is not implemented as a ChoiceField, as we don't want validation
# at the database level on this field. Arbitrary values are allowed here.
feature_id = models.CharField(
- _('Feature identifier'),
+ _("Feature identifier"),
max_length=255,
unique=True,
)
add_date = models.DateTimeField(
- _('Date feature was added'),
+ _("Date feature was added"),
auto_now_add=True,
)
# TODO: rename this field to `past_default_true` and follow this steps when deploying
# https://github.com/readthedocs/readthedocs.org/pull/7524#issuecomment-703663724
default_true = models.BooleanField(
- _('Default all past projects to True'),
+ _("Default all past projects to True"),
default=False,
)
future_default_true = models.BooleanField(
- _('Default all future projects to True'),
+ _("Default all future projects to True"),
default=False,
)
objects = FeatureQuerySet.as_manager()
def __str__(self):
- return '{} feature'.format(self.get_feature_display(),)
+ return "{} feature".format(
+ self.get_feature_display(),
+ )
def get_feature_display(self):
"""
@@ -2123,22 +2134,22 @@ def get_feature_display(self):
class EnvironmentVariable(TimeStampedModel, models.Model):
name = models.CharField(
max_length=128,
- help_text=_('Name of the environment variable'),
+ help_text=_("Name of the environment variable"),
)
value = models.CharField(
max_length=2048,
- help_text=_('Value of the environment variable'),
+ help_text=_("Value of the environment variable"),
)
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
- help_text=_('Project where this variable will be used'),
+ help_text=_("Project where this variable will be used"),
)
public = models.BooleanField(
- _('Public'),
+ _("Public"),
default=False,
null=True,
- help_text=_('Expose this environment variable in PR builds?'),
+ help_text=_("Expose this environment variable in PR builds?"),
)
objects = RelatedProjectQuerySet.as_manager()
| diff --git a/readthedocs/proxito/tests/test_hosting.py b/readthedocs/proxito/tests/test_hosting.py
--- a/readthedocs/proxito/tests/test_hosting.py
+++ b/readthedocs/proxito/tests/test_hosting.py
@@ -701,7 +701,7 @@ def test_number_of_queries_project_version_slug(self):
active=True,
)
- with self.assertNumQueries(20):
+ with self.assertNumQueries(21):
r = self.client.get(
reverse("proxito_readthedocs_docs_addons"),
{
@@ -730,7 +730,7 @@ def test_number_of_queries_url(self):
active=True,
)
- with self.assertNumQueries(20):
+ with self.assertNumQueries(21):
r = self.client.get(
reverse("proxito_readthedocs_docs_addons"),
{
@@ -766,7 +766,7 @@ def test_number_of_queries_url_subproject(self):
active=True,
)
- with self.assertNumQueries(24):
+ with self.assertNumQueries(25):
r = self.client.get(
reverse("proxito_readthedocs_docs_addons"),
{
@@ -792,7 +792,7 @@ def test_number_of_queries_url_translations(self):
language=language,
)
- with self.assertNumQueries(24):
+ with self.assertNumQueries(25):
r = self.client.get(
reverse("proxito_readthedocs_docs_addons"),
{
| Addons: add historical models to `AddonsConfig`
It would be good to add historical models to the addons configuration so we can understand what are the features project has enabled, but also track them as time series and make better decisions on feature usage.
I haven't used this pattern, but I suppose we have everything in place already and it shouldn't be hard to implement, right? cc @stsewd
| You only need to add this manager to the model https://github.com/readthedocs/readthedocs.org/blob/f283500df8267ec75d06928a56df03cbc8c3e5c8/readthedocs/core/models.py#L63-L63 | 2024-02-19T15:25:25 |
readthedocs/readthedocs.org | 11,130 | readthedocs__readthedocs.org-11130 | [
"11118"
] | a096bd6e96681f21f314e4c7e19271989157a86e | diff --git a/readthedocs/api/v3/serializers.py b/readthedocs/api/v3/serializers.py
--- a/readthedocs/api/v3/serializers.py
+++ b/readthedocs/api/v3/serializers.py
@@ -66,50 +66,7 @@ class NotificationLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
def get__self(self, obj):
- content_type_name = obj.attached_to_content_type.name
- if content_type_name == "user":
- url = "users-notifications-detail"
- path = reverse(
- url,
- kwargs={
- "notification_pk": obj.pk,
- "parent_lookup_user__username": obj.attached_to.username,
- },
- )
-
- elif content_type_name == "build":
- url = "projects-builds-notifications-detail"
- project_slug = obj.attached_to.project.slug
- path = reverse(
- url,
- kwargs={
- "notification_pk": obj.pk,
- "parent_lookup_project__slug": project_slug,
- "parent_lookup_build__id": obj.attached_to_id,
- },
- )
-
- elif content_type_name == "project":
- url = "projects-notifications-detail"
- project_slug = obj.attached_to.slug
- path = reverse(
- url,
- kwargs={
- "notification_pk": obj.pk,
- "parent_lookup_project__slug": project_slug,
- },
- )
-
- elif content_type_name == "organization":
- url = "organizations-notifications-detail"
- path = reverse(
- url,
- kwargs={
- "notification_pk": obj.pk,
- "parent_lookup_organization__slug": obj.attached_to.slug,
- },
- )
-
+ path = obj.get_absolute_url()
return self._absolute_url(path)
diff --git a/readthedocs/notifications/models.py b/readthedocs/notifications/models.py
--- a/readthedocs/notifications/models.py
+++ b/readthedocs/notifications/models.py
@@ -4,10 +4,10 @@
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
+from django.urls import reverse
from django.utils.translation import gettext_noop as _
from django_extensions.db.models import TimeStampedModel
-
from .constants import CANCELLED, DISMISSED, READ, UNREAD, WARNING
from .messages import Message, registry
from .querysets import NotificationQuerySet
@@ -93,3 +93,49 @@ def get_message(self):
)
return message
+
+ def get_absolute_url(self):
+ content_type_name = self.attached_to_content_type.name
+ if content_type_name == "user":
+ url = "users-notifications-detail"
+ path = reverse(
+ url,
+ kwargs={
+ "notification_pk": self.pk,
+ "parent_lookup_user__username": self.attached_to.username,
+ },
+ )
+
+ elif content_type_name == "build":
+ url = "projects-builds-notifications-detail"
+ project_slug = self.attached_to.project.slug
+ path = reverse(
+ url,
+ kwargs={
+ "notification_pk": self.pk,
+ "parent_lookup_project__slug": project_slug,
+ "parent_lookup_build__id": self.attached_to_id,
+ },
+ )
+
+ elif content_type_name == "project":
+ url = "projects-notifications-detail"
+ project_slug = self.attached_to.slug
+ path = reverse(
+ url,
+ kwargs={
+ "notification_pk": self.pk,
+ "parent_lookup_project__slug": project_slug,
+ },
+ )
+
+ elif content_type_name == "organization":
+ url = "organizations-notifications-detail"
+ path = reverse(
+ url,
+ kwargs={
+ "notification_pk": self.pk,
+ "parent_lookup_organization__slug": self.attached_to.slug,
+ },
+ )
+ return path
| Notifications: ability to dismiss a notification
We are currently showing all the `read` and `unread` notifications, but we are not allowing users to dismiss them. So, we are always accumulating them.
We should recover the small `X` next to the notification to call the APIv3 and dismiss the notification.
| 2024-02-20T17:59:15 |
||
readthedocs/readthedocs.org | 11,153 | readthedocs__readthedocs.org-11153 | [
"11146"
] | 23bf20c9b28a053a83d329bfb2294e1191c13800 | diff --git a/readthedocs/builds/views.py b/readthedocs/builds/views.py
--- a/readthedocs/builds/views.py
+++ b/readthedocs/builds/views.py
@@ -168,6 +168,7 @@ def get_context_data(self, **kwargs):
context["project"] = self.project
build = self.get_object()
+ context["notifications"] = build.notifications.all()
if not build.notifications.filter(
message_id=BuildAppError.GENERIC_WITH_BUILD_ID
@@ -212,5 +213,5 @@ def get_context_data(self, **kwargs):
issue_url = scheme.format(**scheme_dict)
issue_url = urlparse(issue_url).geturl()
context["issue_url"] = issue_url
- context["notifications"] = build.notifications.all()
+
return context
| Notifications: always show notifications on build details' page
It seems that in https://github.com/readthedocs/readthedocs.org/pull/11117 we introduced a bug that it only shows the notifications for the build to users that have permissions over that project in particular --which is wrong. Since the project is public, it should show the notifications to all the users with access to the build detail's page.
| I've recreated the issue with a minimal install at:
https://github.com/taylorhummon/silently-failing-build
See:
https://readthedocs.org/projects/silently-failing-build/builds/23550653/ | 2024-02-26T11:22:35 |
|
readthedocs/readthedocs.org | 11,155 | readthedocs__readthedocs.org-11155 | [
"11055"
] | 23bf20c9b28a053a83d329bfb2294e1191c13800 | diff --git a/readthedocs/api/v3/serializers.py b/readthedocs/api/v3/serializers.py
--- a/readthedocs/api/v3/serializers.py
+++ b/readthedocs/api/v3/serializers.py
@@ -462,6 +462,7 @@ class ProjectLinksSerializer(BaseLinksSerializer):
subprojects = serializers.SerializerMethodField()
superproject = serializers.SerializerMethodField()
translations = serializers.SerializerMethodField()
+ notifications = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse("projects-detail", kwargs={"project_slug": obj.slug})
@@ -530,6 +531,15 @@ def get_translations(self, obj):
)
return self._absolute_url(path)
+ def get_notifications(self, obj):
+ path = reverse(
+ "projects-notifications-list",
+ kwargs={
+ "parent_lookup_project__slug": obj.slug,
+ },
+ )
+ return self._absolute_url(path)
+
class ProjectCreateSerializerBase(TaggitSerializer, FlexFieldsModelSerializer):
| diff --git a/readthedocs/api/v3/tests/responses/projects-detail.json b/readthedocs/api/v3/tests/responses/projects-detail.json
--- a/readthedocs/api/v3/tests/responses/projects-detail.json
+++ b/readthedocs/api/v3/tests/responses/projects-detail.json
@@ -71,6 +71,7 @@
"_self": "https://readthedocs.org/api/v3/projects/project/",
"builds": "https://readthedocs.org/api/v3/projects/project/builds/",
"environmentvariables": "https://readthedocs.org/api/v3/projects/project/environmentvariables/",
+ "notifications": "https://readthedocs.org/api/v3/projects/project/notifications/",
"redirects": "https://readthedocs.org/api/v3/projects/project/redirects/",
"subprojects": "https://readthedocs.org/api/v3/projects/project/subprojects/",
"superproject": "https://readthedocs.org/api/v3/projects/project/superproject/",
diff --git a/readthedocs/api/v3/tests/responses/projects-list.json b/readthedocs/api/v3/tests/responses/projects-list.json
--- a/readthedocs/api/v3/tests/responses/projects-list.json
+++ b/readthedocs/api/v3/tests/responses/projects-list.json
@@ -47,6 +47,7 @@
"versions": "https://readthedocs.org/api/v3/projects/project/versions/",
"builds": "https://readthedocs.org/api/v3/projects/project/builds/",
"environmentvariables": "https://readthedocs.org/api/v3/projects/project/environmentvariables/",
+ "notifications": "https://readthedocs.org/api/v3/projects/project/notifications/",
"redirects": "https://readthedocs.org/api/v3/projects/project/redirects/",
"subprojects": "https://readthedocs.org/api/v3/projects/project/subprojects/",
"superproject": "https://readthedocs.org/api/v3/projects/project/superproject/",
diff --git a/readthedocs/api/v3/tests/responses/projects-list_POST.json b/readthedocs/api/v3/tests/responses/projects-list_POST.json
--- a/readthedocs/api/v3/tests/responses/projects-list_POST.json
+++ b/readthedocs/api/v3/tests/responses/projects-list_POST.json
@@ -3,6 +3,7 @@
"_self": "https://readthedocs.org/api/v3/projects/test-project/",
"builds": "https://readthedocs.org/api/v3/projects/test-project/builds/",
"environmentvariables": "https://readthedocs.org/api/v3/projects/test-project/environmentvariables/",
+ "notifications": "https://readthedocs.org/api/v3/projects/test-project/notifications/",
"redirects": "https://readthedocs.org/api/v3/projects/test-project/redirects/",
"subprojects": "https://readthedocs.org/api/v3/projects/test-project/subprojects/",
"superproject": "https://readthedocs.org/api/v3/projects/test-project/superproject/",
diff --git a/readthedocs/api/v3/tests/responses/projects-subprojects-detail.json b/readthedocs/api/v3/tests/responses/projects-subprojects-detail.json
--- a/readthedocs/api/v3/tests/responses/projects-subprojects-detail.json
+++ b/readthedocs/api/v3/tests/responses/projects-subprojects-detail.json
@@ -9,6 +9,7 @@
"_self": "https://readthedocs.org/api/v3/projects/subproject/",
"builds": "https://readthedocs.org/api/v3/projects/subproject/builds/",
"environmentvariables": "https://readthedocs.org/api/v3/projects/subproject/environmentvariables/",
+ "notifications": "https://readthedocs.org/api/v3/projects/subproject/notifications/",
"redirects": "https://readthedocs.org/api/v3/projects/subproject/redirects/",
"subprojects": "https://readthedocs.org/api/v3/projects/subproject/subprojects/",
"superproject": "https://readthedocs.org/api/v3/projects/subproject/superproject/",
diff --git a/readthedocs/api/v3/tests/responses/projects-subprojects-list.json b/readthedocs/api/v3/tests/responses/projects-subprojects-list.json
--- a/readthedocs/api/v3/tests/responses/projects-subprojects-list.json
+++ b/readthedocs/api/v3/tests/responses/projects-subprojects-list.json
@@ -14,6 +14,7 @@
"_self": "https://readthedocs.org/api/v3/projects/subproject/",
"builds": "https://readthedocs.org/api/v3/projects/subproject/builds/",
"environmentvariables": "https://readthedocs.org/api/v3/projects/subproject/environmentvariables/",
+ "notifications": "https://readthedocs.org/api/v3/projects/subproject/notifications/",
"redirects": "https://readthedocs.org/api/v3/projects/subproject/redirects/",
"subprojects": "https://readthedocs.org/api/v3/projects/subproject/subprojects/",
"superproject": "https://readthedocs.org/api/v3/projects/subproject/superproject/",
diff --git a/readthedocs/api/v3/tests/responses/projects-subprojects-list_POST.json b/readthedocs/api/v3/tests/responses/projects-subprojects-list_POST.json
--- a/readthedocs/api/v3/tests/responses/projects-subprojects-list_POST.json
+++ b/readthedocs/api/v3/tests/responses/projects-subprojects-list_POST.json
@@ -9,6 +9,7 @@
"_self": "https://readthedocs.org/api/v3/projects/new-project/",
"builds": "https://readthedocs.org/api/v3/projects/new-project/builds/",
"environmentvariables": "https://readthedocs.org/api/v3/projects/new-project/environmentvariables/",
+ "notifications": "https://readthedocs.org/api/v3/projects/new-project/notifications/",
"redirects": "https://readthedocs.org/api/v3/projects/new-project/redirects/",
"subprojects": "https://readthedocs.org/api/v3/projects/new-project/subprojects/",
"superproject": "https://readthedocs.org/api/v3/projects/new-project/superproject/",
diff --git a/readthedocs/api/v3/tests/responses/projects-superproject.json b/readthedocs/api/v3/tests/responses/projects-superproject.json
--- a/readthedocs/api/v3/tests/responses/projects-superproject.json
+++ b/readthedocs/api/v3/tests/responses/projects-superproject.json
@@ -11,6 +11,7 @@
"_self": "https://readthedocs.org/api/v3/projects/project/",
"builds": "https://readthedocs.org/api/v3/projects/project/builds/",
"environmentvariables": "https://readthedocs.org/api/v3/projects/project/environmentvariables/",
+ "notifications": "https://readthedocs.org/api/v3/projects/project/notifications/",
"redirects": "https://readthedocs.org/api/v3/projects/project/redirects/",
"subprojects": "https://readthedocs.org/api/v3/projects/project/subprojects/",
"superproject": "https://readthedocs.org/api/v3/projects/project/superproject/",
diff --git a/readthedocs/api/v3/tests/responses/projects-versions-builds-list_POST.json b/readthedocs/api/v3/tests/responses/projects-versions-builds-list_POST.json
--- a/readthedocs/api/v3/tests/responses/projects-versions-builds-list_POST.json
+++ b/readthedocs/api/v3/tests/responses/projects-versions-builds-list_POST.json
@@ -39,6 +39,7 @@
"builds": "https://readthedocs.org/api/v3/projects/project/builds/",
"environmentvariables": "https://readthedocs.org/api/v3/projects/project/environmentvariables/",
"redirects": "https://readthedocs.org/api/v3/projects/project/redirects/",
+ "notifications": "https://readthedocs.org/api/v3/projects/project/notifications/",
"subprojects": "https://readthedocs.org/api/v3/projects/project/subprojects/",
"superproject": "https://readthedocs.org/api/v3/projects/project/superproject/",
"translations": "https://readthedocs.org/api/v3/projects/project/translations/",
diff --git a/readthedocs/api/v3/tests/responses/remoterepositories-list.json b/readthedocs/api/v3/tests/responses/remoterepositories-list.json
--- a/readthedocs/api/v3/tests/responses/remoterepositories-list.json
+++ b/readthedocs/api/v3/tests/responses/remoterepositories-list.json
@@ -21,6 +21,7 @@
"_self": "https://readthedocs.org/api/v3/projects/project/",
"builds": "https://readthedocs.org/api/v3/projects/project/builds/",
"environmentvariables": "https://readthedocs.org/api/v3/projects/project/environmentvariables/",
+ "notifications": "https://readthedocs.org/api/v3/projects/project/notifications/",
"redirects": "https://readthedocs.org/api/v3/projects/project/redirects/",
"subprojects": "https://readthedocs.org/api/v3/projects/project/subprojects/",
"superproject": "https://readthedocs.org/api/v3/projects/project/superproject/",
| APIv3: `_links.notifications` is not returned for `Project`
We should update this serializer https://github.com/readthedocs/readthedocs.org/blob/7a58acb50b964bc60f19ea10a1e5135737d77d84/readthedocs/api/v3/serializers.py#L498-L508 to return `notifications`.
| 2024-02-26T15:40:25 |
|
readthedocs/readthedocs.org | 11,164 | readthedocs__readthedocs.org-11164 | [
"11163"
] | 90e2d486d956d7926fee48735d7b1c6ecf1b442d | diff --git a/readthedocs/core/forms.py b/readthedocs/core/forms.py
--- a/readthedocs/core/forms.py
+++ b/readthedocs/core/forms.py
@@ -1,8 +1,6 @@
"""Forms for core app."""
import structlog
-from crispy_forms.helper import FormHelper
-from crispy_forms.layout import Fieldset, Layout, Submit
from django import forms
from django.contrib.auth.models import User
from django.core.exceptions import NON_FIELD_ERRORS
@@ -23,15 +21,7 @@ class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
# Don't allow users edit someone else's user page
- profile_fields = ["first_name", "last_name", "homepage"]
- optout_email_fields = [
- "optout_email_config_file_deprecation",
- "optout_email_build_image_deprecation",
- ]
- fields = (
- *profile_fields,
- *optout_email_fields,
- )
+ fields = ["first_name", "last_name", "homepage"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@@ -41,20 +31,6 @@ def __init__(self, *args, **kwargs):
except AttributeError:
pass
- self.helper = FormHelper()
- field_sets = [
- Fieldset(
- _("User settings"),
- *self.Meta.profile_fields,
- ),
- Fieldset(
- _("Email settings"),
- *self.Meta.optout_email_fields,
- ),
- ]
- self.helper.layout = Layout(*field_sets)
- self.helper.add_input(Submit("save", _("Save")))
-
def save(self, commit=True):
first_name = self.cleaned_data.pop("first_name", None)
last_name = self.cleaned_data.pop("last_name", None)
diff --git a/readthedocs/core/migrations/0015_remove_email_options.py b/readthedocs/core/migrations/0015_remove_email_options.py
new file mode 100644
--- /dev/null
+++ b/readthedocs/core/migrations/0015_remove_email_options.py
@@ -0,0 +1,31 @@
+# Generated by Django 4.2.10 on 2024-02-27 18:55
+
+from django.db import migrations
+from django_safemigrate import Safe
+
+
+class Migration(migrations.Migration):
+ safe = Safe.after_deploy
+
+ dependencies = [
+ ("core", "0014_optout_email_build_image_deprecation"),
+ ]
+
+ operations = [
+ migrations.RemoveField(
+ model_name="historicaluserprofile",
+ name="optout_email_build_image_deprecation",
+ ),
+ migrations.RemoveField(
+ model_name="historicaluserprofile",
+ name="optout_email_config_file_deprecation",
+ ),
+ migrations.RemoveField(
+ model_name="userprofile",
+ name="optout_email_build_image_deprecation",
+ ),
+ migrations.RemoveField(
+ model_name="userprofile",
+ name="optout_email_config_file_deprecation",
+ ),
+ ]
diff --git a/readthedocs/core/models.py b/readthedocs/core/models.py
--- a/readthedocs/core/models.py
+++ b/readthedocs/core/models.py
@@ -43,22 +43,6 @@ class UserProfile(TimeStampedModel):
whitelisted = models.BooleanField(_("Whitelisted"), default=False)
banned = models.BooleanField(_("Banned"), default=False)
- # Opt-out on emails
- # NOTE: this is a temporary field that we can remove after September 25, 2023
- # See https://blog.readthedocs.com/migrate-configuration-v2/
- optout_email_config_file_deprecation = models.BooleanField(
- _("Opt-out from email about 'Config file deprecation'"),
- default=False,
- null=True,
- )
- # NOTE: this is a temporary field that we can remove after October 16, 2023
- # See https://blog.readthedocs.com/use-build-os-config/
- optout_email_build_image_deprecation = models.BooleanField(
- _("Opt-out from email about '\"build.image\" config key deprecation'"),
- default=False,
- null=True,
- )
-
# Model history
history = ExtraHistoricalRecords()
| Notifications: remove unused email account settings
Noticed these are still tucked away in the user profile form:

They are both expired deprecations and we talked about moving these to a dedicated form anyways.
| 2024-02-27T18:58:04 |
||
readthedocs/readthedocs.org | 11,171 | readthedocs__readthedocs.org-11171 | [
"11170"
] | d8a9d1a4ef68308f23d212ac6a8f81c34e1cbb97 | diff --git a/readthedocs/projects/tasks/builds.py b/readthedocs/projects/tasks/builds.py
--- a/readthedocs/projects/tasks/builds.py
+++ b/readthedocs/projects/tasks/builds.py
@@ -24,6 +24,7 @@
ARTIFACT_TYPES_WITHOUT_MULTIPLE_FILES_SUPPORT,
BUILD_FINAL_STATES,
BUILD_STATE_BUILDING,
+ BUILD_STATE_CANCELLED,
BUILD_STATE_CLONING,
BUILD_STATE_FINISHED,
BUILD_STATE_INSTALLING,
@@ -136,7 +137,7 @@ class SyncRepositoryTask(SyncRepositoryMixin, Task):
in our database.
"""
- name = __name__ + '.sync_repository_task'
+ name = __name__ + ".sync_repository_task"
max_retries = 5
default_retry_delay = 7 * 60
throws = (
@@ -145,7 +146,7 @@ class SyncRepositoryTask(SyncRepositoryMixin, Task):
)
def before_start(self, task_id, args, kwargs):
- log.info('Running task.', name=self.name)
+ log.info("Running task.", name=self.name)
# Create the object to store all the task-related data
self.data = TaskData()
@@ -168,7 +169,7 @@ def before_start(self, task_id, args, kwargs):
# Also note there are builds that are triggered without a commit
# because they just build the latest commit for that version
- self.data.build_commit = kwargs.get('build_commit')
+ self.data.build_commit = kwargs.get("build_commit")
log.bind(
project_slug=self.data.project.slug,
@@ -179,7 +180,7 @@ def on_failure(self, exc, task_id, args, kwargs, einfo):
# Do not log as error handled exceptions
if isinstance(exc, RepositoryError):
log.warning(
- 'There was an error with the repository.',
+ "There was an error with the repository.",
)
elif isinstance(exc, SyncRepositoryLocked):
log.warning(
@@ -274,10 +275,8 @@ class UpdateDocsTask(SyncRepositoryMixin, Task):
build all the documentation formats and upload them to the storage.
"""
- name = __name__ + '.update_docs_task'
- autoretry_for = (
- BuildMaxConcurrencyError,
- )
+ name = __name__ + ".update_docs_task"
+ autoretry_for = (BuildMaxConcurrencyError,)
max_retries = settings.RTD_BUILDS_MAX_RETRIES
default_retry_delay = settings.RTD_BUILDS_RETRY_DELAY
retry_backoff = False
@@ -320,10 +319,12 @@ class UpdateDocsTask(SyncRepositoryMixin, Task):
def _setup_sigterm(self):
def sigterm_received(*args, **kwargs):
- log.warning('SIGTERM received. Waiting for build to stop gracefully after it finishes.')
+ log.warning(
+ "SIGTERM received. Waiting for build to stop gracefully after it finishes."
+ )
def sigint_received(*args, **kwargs):
- log.warning('SIGINT received. Canceling the build running.')
+ log.warning("SIGINT received. Canceling the build running.")
# Only allow to cancel the build if it's not already uploading the files.
# This is to protect our users to end up with half of the documentation uploaded.
@@ -347,12 +348,12 @@ def _check_concurrency_limit(self):
)
concurrency_limit_reached = response.get("limit_reached", False)
max_concurrent_builds = response.get(
- 'max_concurrent',
+ "max_concurrent",
settings.RTD_MAX_CONCURRENT_BUILDS,
)
except Exception:
log.exception(
- 'Error while hitting/parsing API for concurrent limit checks from builder.',
+ "Error while hitting/parsing API for concurrent limit checks from builder.",
project_slug=self.data.project.slug,
version_slug=self.data.version.slug,
)
@@ -375,7 +376,7 @@ def _check_concurrency_limit(self):
def _check_project_disabled(self):
if self.data.project.skip:
- log.warning('Project build skipped.')
+ log.warning("Project build skipped.")
raise BuildAppError(BuildAppError.BUILDS_DISABLED)
def before_start(self, task_id, args, kwargs):
@@ -403,13 +404,13 @@ def before_start(self, task_id, args, kwargs):
self.data.project = self.data.version.project
# Save the builder instance's name into the build object
- self.data.build['builder'] = socket.gethostname()
+ self.data.build["builder"] = socket.gethostname()
# Reset any previous build error reported to the user
- self.data.build['error'] = ''
+ self.data.build["error"] = ""
# Also note there are builds that are triggered without a commit
# because they just build the latest commit for that version
- self.data.build_commit = kwargs.get('build_commit')
+ self.data.build_commit = kwargs.get("build_commit")
self.data.build_director = BuildDirector(
data=self.data,
@@ -417,7 +418,7 @@ def before_start(self, task_id, args, kwargs):
log.bind(
# NOTE: ``self.data.build`` is just a regular dict, not an APIBuild :'(
- builder=self.data.build['builder'],
+ builder=self.data.build["builder"],
commit=self.data.build_commit,
project_slug=self.data.project.slug,
version_slug=self.data.version.slug,
@@ -470,7 +471,7 @@ def on_failure(self, exc, task_id, args, kwargs, einfo):
#
# So, we create the `self.data.build` with the minimum required data.
self.data.build = {
- 'id': self.data.build_pk,
+ "id": self.data.build_pk,
}
# Known errors in our application code (e.g. we couldn't connect to
@@ -488,6 +489,10 @@ def on_failure(self, exc, task_id, args, kwargs, einfo):
else:
message_id = BuildUserError.GENERIC
+ # Set build state as cancelled if the user cancelled the build
+ if isinstance(exc, BuildCancelled):
+ self.data.build["state"] = BUILD_STATE_CANCELLED
+
else:
# We don't know what happened in the build. Log the exception and
# report a generic notification to the user.
@@ -513,7 +518,7 @@ def on_failure(self, exc, task_id, args, kwargs, einfo):
if message_id not in self.exceptions_without_notifications:
self.send_notifications(
self.data.version_pk,
- self.data.build['id'],
+ self.data.build["id"],
event=WebHookEvent.BUILD_FAILED,
)
@@ -541,7 +546,7 @@ def on_failure(self, exc, task_id, args, kwargs, einfo):
send_external_build_status(
version_type=version_type,
- build_pk=self.data.build['id'],
+ build_pk=self.data.build["id"],
commit=self.data.build_commit,
status=status,
)
@@ -661,20 +666,20 @@ def on_success(self, retval, task_id, args, kwargs):
self.send_notifications(
self.data.version.pk,
- self.data.build['id'],
+ self.data.build["id"],
event=WebHookEvent.BUILD_PASSED,
)
if self.data.build_commit:
send_external_build_status(
version_type=self.data.version.type,
- build_pk=self.data.build['id'],
+ build_pk=self.data.build["id"],
commit=self.data.build_commit,
status=BUILD_STATUS_SUCCESS,
)
# Update build object
- self.data.build['success'] = True
+ self.data.build["success"] = True
def on_retry(self, exc, task_id, args, kwargs, einfo):
"""
@@ -686,11 +691,11 @@ def on_retry(self, exc, task_id, args, kwargs, einfo):
See https://docs.celeryproject.org/en/master/userguide/tasks.html#retrying
"""
- log.info('Retrying this task.')
+ log.info("Retrying this task.")
if isinstance(exc, BuildMaxConcurrencyError):
log.warning(
- 'Delaying tasks due to concurrency limit.',
+ "Delaying tasks due to concurrency limit.",
project_slug=self.data.project.slug,
version_slug=self.data.version.slug,
)
@@ -713,7 +718,7 @@ def after_return(self, status, retval, task_id, args, kwargs, einfo):
so some attributes from the `self.data` object may not be defined.
"""
# Update build object
- self.data.build['length'] = (timezone.now() - self.data.start_time).seconds
+ self.data.build["length"] = (timezone.now() - self.data.start_time).seconds
build_state = None
# The state key might not be defined
@@ -742,9 +747,9 @@ def after_return(self, status, retval, task_id, args, kwargs, einfo):
)
log.info(
- 'Build finished.',
- length=self.data.build['length'],
- success=self.data.build['success']
+ "Build finished.",
+ length=self.data.build["length"],
+ success=self.data.build["success"],
)
def update_build(self, state=None):
@@ -856,23 +861,20 @@ def get_build(self, build_pk):
if build_pk:
build = self.data.api_client.build(build_pk).get()
private_keys = [
- 'project',
- 'version',
- 'resource_uri',
- 'absolute_uri',
+ "project",
+ "version",
+ "resource_uri",
+ "absolute_uri",
]
# TODO: try to use the same technique than for ``APIProject``.
- return {
- key: val
- for key, val in build.items() if key not in private_keys
- }
+ return {key: val for key, val in build.items() if key not in private_keys}
# NOTE: this can be just updated on `self.data.build['']` and sent once the
# build has finished to reduce API calls.
def set_valid_clone(self):
"""Mark on the project that it has been cloned properly."""
self.data.api_client.project(self.data.project.pk).patch(
- {'has_valid_clone': True}
+ {"has_valid_clone": True}
)
self.data.project.has_valid_clone = True
self.data.version.project.has_valid_clone = True
@@ -887,7 +889,7 @@ def store_build_artifacts(self):
Remove build artifacts of types not included in this build (PDF, ePub, zip only).
"""
time_before_store_build_artifacts = timezone.now()
- log.info('Writing build artifacts to media storage')
+ log.info("Writing build artifacts to media storage")
self.update_build(state=BUILD_STATE_UPLOADING)
valid_artifacts = self.get_valid_artifact_types()
| diff --git a/readthedocs/projects/tests/test_build_tasks.py b/readthedocs/projects/tests/test_build_tasks.py
--- a/readthedocs/projects/tests/test_build_tasks.py
+++ b/readthedocs/projects/tests/test_build_tasks.py
@@ -17,7 +17,7 @@
from readthedocs.config.config import BuildConfigV2
from readthedocs.config.exceptions import ConfigError
from readthedocs.config.tests.test_config import get_build_config
-from readthedocs.doc_builder.exceptions import BuildUserError
+from readthedocs.doc_builder.exceptions import BuildCancelled, BuildUserError
from readthedocs.projects.exceptions import RepositoryError
from readthedocs.projects.models import EnvironmentVariable, Project, WebHookEvent
from readthedocs.projects.tasks.builds import sync_repository_task, update_docs_task
@@ -695,6 +695,55 @@ def test_failed_build(
assert revoke_key_request._request.method == "POST"
assert revoke_key_request.path == "/api/v2/revoke/"
+ @mock.patch("readthedocs.projects.tasks.builds.send_external_build_status")
+ @mock.patch("readthedocs.projects.tasks.builds.UpdateDocsTask.execute")
+ def test_cancelled_build(
+ self,
+ execute,
+ send_external_build_status,
+ ):
+ # Force an exception from the execution of the task. We don't really
+ # care "where" it was raised: setup, build, syncing directories, etc
+ execute.side_effect = BuildCancelled(
+ message_id=BuildCancelled.CANCELLED_BY_USER
+ )
+
+ self._trigger_update_docs_task()
+
+ send_external_build_status.assert_called_once_with(
+ version_type=self.version.type,
+ build_pk=self.build.pk,
+ commit=self.build.commit,
+ status=BUILD_STATUS_FAILURE,
+ )
+
+ notification_request = self.requests_mock.request_history[-3]
+ assert notification_request._request.method == "POST"
+ assert notification_request.path == "/api/v2/notifications/"
+ assert notification_request.json() == {
+ "attached_to": f"build/{self.build.pk}",
+ "message_id": BuildCancelled.CANCELLED_BY_USER,
+ "state": "unread",
+ "dismissable": False,
+ "news": False,
+ "format_values": {},
+ }
+
+ # Test we are updating the DB by calling the API with the updated build object
+ # The second last one should be the PATCH for the build
+ build_status_request = self.requests_mock.request_history[-2]
+ assert build_status_request._request.method == "PATCH"
+ assert build_status_request.path == "/api/v2/build/1/"
+ assert build_status_request.json() == {
+ "builder": mock.ANY,
+ "commit": self.build.commit,
+ "error": "", # We are not sending ``error`` anymore
+ "id": self.build.pk,
+ "length": mock.ANY,
+ "state": "cancelled",
+ "success": False,
+ }
+
@mock.patch("readthedocs.doc_builder.director.load_yaml_config")
def test_build_commands_executed(
self,
| Build: missing state cancelled
It seems like we may have done something to the `Build.state = "cancelled"`, I'm seeing these builds as `Build.state = "finished"` now instead.
To reproduce:
- Open beta dashboard
- Start a build
- In the build detail page, click cancel
- Build state is now "failed"
- Inspect API response, build state is "failed"


| 2024-02-29T10:49:17 |
|
readthedocs/readthedocs.org | 11,175 | readthedocs__readthedocs.org-11175 | [
"11097"
] | 268bd690bf2dd65a2a515ccf3fa734c6a205e885 | diff --git a/readthedocs/doc_builder/director.py b/readthedocs/doc_builder/director.py
--- a/readthedocs/doc_builder/director.py
+++ b/readthedocs/doc_builder/director.py
@@ -16,6 +16,8 @@
from django.utils.translation import gettext_lazy as _
from readthedocs.builds.constants import EXTERNAL
+from readthedocs.config.config import CONFIG_FILENAME_REGEX
+from readthedocs.config.find import find_one
from readthedocs.core.utils.filesystem import safe_open
from readthedocs.doc_builder.config import load_yaml_config
from readthedocs.doc_builder.exceptions import BuildUserError
@@ -108,20 +110,6 @@ def setup_vcs(self):
# self.run_build_job("pre_checkout")
self.checkout()
- # Output the path for the config file used.
- # This works as confirmation for us & the user about which file is used,
- # as well as the fact that *any* config file is used.
- if self.data.config.source_file:
- cwd = self.data.project.checkout_path(self.data.version.slug)
- command = self.vcs_environment.run(
- "cat",
- # Show user the relative path to the config file
- # TODO: Have our standard path replacement code catch this.
- # https://github.com/readthedocs/readthedocs.org/pull/10413#discussion_r1230765843
- self.data.config.source_file.replace(cwd + "/", ""),
- cwd=cwd,
- )
-
self.run_build_job("post_checkout")
commit = self.data.build_commit or self.vcs_repository.commit
@@ -240,6 +228,24 @@ def checkout(self):
if custom_config_file:
log.info("Using a custom .readthedocs.yaml file.", path=custom_config_file)
+
+ checkout_path = self.data.project.checkout_path(self.data.version.slug)
+ default_config_file = find_one(checkout_path, CONFIG_FILENAME_REGEX)
+ final_config_file = custom_config_file or default_config_file
+
+ # Output the path for the config file used.
+ # This works as confirmation for us & the user about which file is used,
+ # as well as the fact that *any* config file is used.
+ if final_config_file:
+ command = self.vcs_environment.run(
+ "cat",
+ # Show user the relative path to the config file
+ # TODO: Have our standard path replacement code catch this.
+ # https://github.com/readthedocs/readthedocs.org/pull/10413#discussion_r1230765843
+ final_config_file.replace(checkout_path + "/", ""),
+ cwd=checkout_path,
+ )
+
self.data.config = load_yaml_config(
version=self.data.version,
readthedocs_yaml_path=custom_config_file,
| diff --git a/readthedocs/projects/tests/test_build_tasks.py b/readthedocs/projects/tests/test_build_tasks.py
--- a/readthedocs/projects/tests/test_build_tasks.py
+++ b/readthedocs/projects/tests/test_build_tasks.py
@@ -810,11 +810,14 @@ def test_build_commands_executed(
python_version = settings.RTD_DOCKER_BUILD_SETTINGS["tools"]["python"]["3"]
self.mocker.mocks["environment.run"].assert_has_calls(
[
- mock.call(
- "cat",
- "readthedocs.yml",
- cwd="/tmp/readthedocs-tests/git-repository",
- ),
+ # TODO: check for this in the VCS environment.
+ # We can't check it here because this is the build environment.
+ #
+ # mock.call(
+ # "cat",
+ # "readthedocs.yml",
+ # cwd="/tmp/readthedocs-tests/git-repository",
+ # ),
mock.call("asdf", "install", "python", python_version),
mock.call("asdf", "global", "python", python_version),
mock.call("asdf", "reshim", "python", record=False),
@@ -1348,7 +1351,10 @@ def test_conda_config_calls_conda_command(self, load_yaml_config):
]
self.mocker.mocks["environment.run"].assert_has_calls(
[
- mock.call("cat", "readthedocs.yml", cwd=mock.ANY),
+ # TODO: check for this in the VCS environment.
+ # We can't check it here because this is the build environment.
+ #
+ # mock.call("cat", "readthedocs.yml", cwd=mock.ANY),
mock.call("asdf", "install", "python", python_version),
mock.call("asdf", "global", "python", python_version),
mock.call("asdf", "reshim", "python", record=False),
@@ -1442,7 +1448,10 @@ def test_python_mamba_commands(self, load_yaml_config):
self.mocker.mocks["environment.run"].assert_has_calls(
[
- mock.call("cat", "readthedocs.yml", cwd=mock.ANY),
+ # TODO: check for this in the VCS environment.
+ # We can't check it here because this is the build environment.
+ #
+ # mock.call("cat", "readthedocs.yml", cwd=mock.ANY),
mock.call("asdf", "install", "python", "mambaforge-4.10.3-10"),
mock.call("asdf", "global", "python", "mambaforge-4.10.3-10"),
mock.call("asdf", "reshim", "python", record=False),
| Build: run `cat readthedocs.yaml` even when there is a config error
I just realized that we first validate the `readthedocs.yaml` file and then run `cat readthedocs.yaml` to show its content. What happens? If there is a syntax error or a missing required value or anything, we don't run the `cat` command. This makes hard to debug and understand what's going on.
We should first run the `cat` command and after that validate the file. This will help a lot with UX.
Example: https://beta.readthedocs.org/projects/test-builds/builds/23364407/
| Related to
* https://github.com/readthedocs/readthedocs.org/issues/7005
* https://github.com/readthedocs/readthedocs.org/issues/4288
This issue would have been being helpful on this case https://github.com/readthedocs/readthedocs.org/issues/11173 where the user had two YAML file in the repository and Read the Docs was picking the incorrect one.
I took a look at this and it seems we need to split the [`readthedocs.config.config.load()`](https://github.com/readthedocs/readthedocs.org/blob/03c03fa4de2a52fb5bbbbd8ab047e485abcc310c/readthedocs/config/config.py#L805-L850) function into two:
1. find the YAML file to load
2. load the YAML file and validate it
Moving out 1) will allow us to know what's the file we will be loading and show it to the user _before_ loading and validating. | 2024-02-29T18:05:39 |
readthedocs/readthedocs.org | 11,198 | readthedocs__readthedocs.org-11198 | [
"10873"
] | 205809047927b46f0aeaa29b0d41fe6412438a35 | diff --git a/readthedocs/core/utils/filesystem.py b/readthedocs/core/utils/filesystem.py
--- a/readthedocs/core/utils/filesystem.py
+++ b/readthedocs/core/utils/filesystem.py
@@ -42,6 +42,7 @@ def assert_path_is_inside_docroot(path):
log.error(
"Suspicious operation outside the docroot directory.",
path_resolved=str(resolved_path),
+ docroot=settings.DOCROOT,
)
raise SuspiciousFileOperation(path)
diff --git a/readthedocs/projects/tasks/builds.py b/readthedocs/projects/tasks/builds.py
--- a/readthedocs/projects/tasks/builds.py
+++ b/readthedocs/projects/tasks/builds.py
@@ -5,10 +5,12 @@
rebuilding documentation.
"""
import os
+import shutil
import signal
import socket
import subprocess
from dataclasses import dataclass, field
+from pathlib import Path
import structlog
from celery import Task
@@ -40,6 +42,7 @@
from readthedocs.builds.utils import memcache_lock
from readthedocs.config.config import BuildConfigV2
from readthedocs.config.exceptions import ConfigError
+from readthedocs.core.utils.filesystem import assert_path_is_inside_docroot
from readthedocs.doc_builder.director import BuildDirector
from readthedocs.doc_builder.environments import (
DockerBuildEnvironment,
@@ -605,7 +608,8 @@ def get_valid_artifact_types(self):
# These output format does not support multiple files yet.
# In case multiple files are found, the upload for this format is not performed.
if artifact_type in ARTIFACT_TYPES_WITHOUT_MULTIPLE_FILES_SUPPORT:
- artifact_format_files = len(os.listdir(artifact_directory))
+ list_dir = os.listdir(artifact_directory)
+ artifact_format_files = len(list_dir)
if artifact_format_files > 1:
log.error(
"Multiple files are not supported for this format. "
@@ -626,6 +630,18 @@ def get_valid_artifact_types(self):
},
)
+ # Rename file as "<project_slug>-<version_slug>.<artifact_type>",
+ # which is the filename that Proxito serves for offline formats.
+ filename = list_dir[0]
+ _, extension = filename.rsplit(".")
+ path = Path(artifact_directory) / filename
+ destination = (
+ Path(artifact_directory) / f"{self.data.project.slug}.{extension}"
+ )
+ assert_path_is_inside_docroot(path)
+ assert_path_is_inside_docroot(destination)
+ shutil.move(path, destination)
+
# If all the conditions were met, the artifact is valid
valid_artifacts.append(artifact_type)
| diff --git a/readthedocs/projects/tests/test_build_tasks.py b/readthedocs/projects/tests/test_build_tasks.py
--- a/readthedocs/projects/tests/test_build_tasks.py
+++ b/readthedocs/projects/tests/test_build_tasks.py
@@ -1,11 +1,14 @@
import os
import pathlib
import textwrap
+import uuid
+from pathlib import Path
from unittest import mock
import django_dynamic_fixture as fixture
import pytest
from django.conf import settings
+from django.test.utils import override_settings
from readthedocs.builds.constants import (
BUILD_STATUS_FAILURE,
@@ -260,6 +263,7 @@ def test_build_respects_formats_mkdocs(self, build_docs_class, load_yaml_config)
build_docs_class.assert_called_once_with("mkdocs") # HTML builder
+ @override_settings(DOCROOT="/tmp/readthedocs-tests/git-repository/")
@mock.patch("readthedocs.doc_builder.director.load_yaml_config")
def test_build_updates_documentation_type(self, load_yaml_config):
assert self.version.documentation_type == "sphinx"
@@ -403,6 +407,8 @@ def test_get_env_vars(self, load_yaml_config, build_environment, config, externa
expected_build_env_vars["PRIVATE_TOKEN"] = "a1b2c3"
assert build_env_vars == expected_build_env_vars
+ @override_settings(DOCROOT="/tmp/readthedocs-tests/git-repository/")
+ @mock.patch("readthedocs.projects.tasks.builds.shutil")
@mock.patch("readthedocs.projects.tasks.builds.index_build")
@mock.patch("readthedocs.projects.tasks.builds.build_complete")
@mock.patch("readthedocs.projects.tasks.builds.send_external_build_status")
@@ -417,6 +423,7 @@ def test_successful_build(
send_external_build_status,
build_complete,
index_build,
+ shutilmock,
):
load_yaml_config.return_value = get_build_config(
{
@@ -433,12 +440,16 @@ def test_successful_build(
# Create the artifact paths, so it's detected by the builder
os.makedirs(self.project.artifact_path(version=self.version.slug, type_="html"))
os.makedirs(self.project.artifact_path(version=self.version.slug, type_="json"))
+ filename = str(uuid.uuid4())
for f in ("htmlzip", "epub", "pdf"):
+ extension = "zip" if f == "htmlzip" else f
os.makedirs(self.project.artifact_path(version=self.version.slug, type_=f))
pathlib.Path(
os.path.join(
self.project.artifact_path(version=self.version.slug, type_=f),
- f"{self.project.slug}.{f}",
+ # Use a random name for the offline format.
+ # We will automatically rename this file to filename El Proxito expects.
+ f"{filename}.{extension}",
)
).touch()
@@ -452,6 +463,36 @@ def test_successful_build(
self._trigger_update_docs_task()
+ # Offline formats were renamed to the correct filename.
+ shutilmock.move.assert_has_calls(
+ [
+ mock.call(
+ Path(
+ f"/tmp/readthedocs-tests/git-repository/_readthedocs/htmlzip/{filename}.zip"
+ ),
+ Path(
+ f"/tmp/readthedocs-tests/git-repository/_readthedocs/htmlzip/{self.project.slug}.zip"
+ ),
+ ),
+ mock.call(
+ Path(
+ f"/tmp/readthedocs-tests/git-repository/_readthedocs/pdf/{filename}.pdf"
+ ),
+ Path(
+ f"/tmp/readthedocs-tests/git-repository/_readthedocs/pdf/{self.project.slug}.pdf"
+ ),
+ ),
+ mock.call(
+ Path(
+ f"/tmp/readthedocs-tests/git-repository/_readthedocs/epub/{filename}.epub"
+ ),
+ Path(
+ f"/tmp/readthedocs-tests/git-repository/_readthedocs/epub/{self.project.slug}.epub"
+ ),
+ ),
+ ]
+ )
+
# It has to be called twice, ``before_start`` and ``after_return``
clean_build.assert_has_calls(
[mock.call(mock.ANY), mock.call(mock.ANY)] # the argument is an APIVersion
| Build: don't care about the filename for the offline formats
Projects using `build.commands` to build their documentation can make usage of the `$READTHEDOCS_OUTPUT` directory to expose offline formats like PDF, ePUB, HTMLZip as documented in https://docs.readthedocs.io/en/latest/build-customization.html
Currently, we support one and only one file per offline format. We have an in-progress work at https://github.com/readthedocs/readthedocs.org/pull/10438, but the work required there is not trivial and it may be off from our roadmap/prioritization for some time now. I think that feature _is important_ for big projects like CPython documentation and we should work on that as soon as possible, tho.
In the meantime, we could make the build process a little simpler by allowing _any filename for the PDF_ saved under `$READTHEDOCS_OUTPUT/pdf/` since right now it requires to exactly be `$READTHEDOCS_OUTPUT/pdf/$READTHEDOCS_PROJECT.pdf` --which is not documented anywhere 😄
I'd say that, as long as there is one and only one PDF file inside the output directory, we shouldn't care about the filename and rename it internally (probably at upload time) to be what we need it to be.
Code references:
* https://github.com/readthedocs/readthedocs.org/blob/90f17910c1486e002a900028ddf8bd85729eeb78/readthedocs/projects/tasks/builds.py#L605-L623
* https://github.com/readthedocs/readthedocs.org/blob/90f17910c1486e002a900028ddf8bd85729eeb78/readthedocs/projects/tasks/builds.py#L904-L935
| 2024-03-08T12:44:33 |
|
readthedocs/readthedocs.org | 11,213 | readthedocs__readthedocs.org-11213 | [
"11131"
] | a0526ff7959f2fceb4e69512312dff4f5d7f8096 | diff --git a/readthedocs/projects/tasks/builds.py b/readthedocs/projects/tasks/builds.py
--- a/readthedocs/projects/tasks/builds.py
+++ b/readthedocs/projects/tasks/builds.py
@@ -447,10 +447,14 @@ def before_start(self, task_id, args, kwargs):
self._reset_build()
def _reset_build(self):
- # Reset build only if it has some commands already.
- if self.data.build.get("commands"):
- log.info("Resetting build.")
- self.data.api_client.build(self.data.build["id"]).reset.post()
+ # Always reset the build before starting.
+ # We used to only reset it when it has at least one command executed already.
+ # However, with the introduction of the new notification system,
+ # it could have a notification attached (e.g. Max concurrency build)
+ # that needs to be removed from the build.
+ # See https://github.com/readthedocs/readthedocs.org/issues/11131
+ log.info("Resetting build.")
+ self.data.api_client.build(self.data.build["id"]).reset.post()
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
| diff --git a/readthedocs/projects/tests/mockers.py b/readthedocs/projects/tests/mockers.py
--- a/readthedocs/projects/tests/mockers.py
+++ b/readthedocs/projects/tests/mockers.py
@@ -208,6 +208,11 @@ def _mock_api(self):
status_code=201,
)
+ self.requestsmock.post(
+ f"{settings.SLUMBER_API_HOST}/api/v2/build/{self.build.pk}/reset/",
+ status_code=201,
+ )
+
self.requestsmock.get(
f"{settings.SLUMBER_API_HOST}/api/v2/build/concurrent/?project__slug={self.project.slug}",
json=lambda request, context: {
diff --git a/readthedocs/projects/tests/test_build_tasks.py b/readthedocs/projects/tests/test_build_tasks.py
--- a/readthedocs/projects/tests/test_build_tasks.py
+++ b/readthedocs/projects/tests/test_build_tasks.py
@@ -297,9 +297,9 @@ def test_build_updates_documentation_type(self, load_yaml_config):
self._trigger_update_docs_task()
# Update version state
- assert self.requests_mock.request_history[7]._request.method == "PATCH"
- assert self.requests_mock.request_history[7].path == "/api/v2/version/1/"
- assert self.requests_mock.request_history[7].json() == {
+ assert self.requests_mock.request_history[8]._request.method == "PATCH"
+ assert self.requests_mock.request_history[8].path == "/api/v2/version/1/"
+ assert self.requests_mock.request_history[8].json() == {
"addons": False,
"build_data": None,
"built": True,
@@ -477,8 +477,12 @@ def test_successful_build(
# TODO: assert the verb and the path for each API call as well
+ # Build reset
+ assert self.requests_mock.request_history[3]._request.method == "POST"
+ assert self.requests_mock.request_history[3].path == "/api/v2/build/1/reset/"
+
# Update build state: cloning
- assert self.requests_mock.request_history[3].json() == {
+ assert self.requests_mock.request_history[4].json() == {
"id": 1,
"state": "cloning",
"commit": "a1b2c3",
@@ -487,7 +491,7 @@ def test_successful_build(
}
# Update build state: installing
- assert self.requests_mock.request_history[4].json() == {
+ assert self.requests_mock.request_history[5].json() == {
"id": 1,
"state": "installing",
"commit": "a1b2c3",
@@ -550,7 +554,7 @@ def test_successful_build(
},
}
# Update build state: building
- assert self.requests_mock.request_history[5].json() == {
+ assert self.requests_mock.request_history[6].json() == {
"id": 1,
"state": "building",
"commit": "a1b2c3",
@@ -560,7 +564,7 @@ def test_successful_build(
"error": "",
}
# Update build state: uploading
- assert self.requests_mock.request_history[6].json() == {
+ assert self.requests_mock.request_history[7].json() == {
"id": 1,
"state": "uploading",
"commit": "a1b2c3",
@@ -570,9 +574,9 @@ def test_successful_build(
"error": "",
}
# Update version state
- assert self.requests_mock.request_history[7]._request.method == "PATCH"
- assert self.requests_mock.request_history[7].path == "/api/v2/version/1/"
- assert self.requests_mock.request_history[7].json() == {
+ assert self.requests_mock.request_history[8]._request.method == "PATCH"
+ assert self.requests_mock.request_history[8].path == "/api/v2/version/1/"
+ assert self.requests_mock.request_history[8].json() == {
"addons": False,
"build_data": None,
"built": True,
@@ -582,11 +586,11 @@ def test_successful_build(
"has_htmlzip": True,
}
# Set project has valid clone
- assert self.requests_mock.request_history[8]._request.method == "PATCH"
- assert self.requests_mock.request_history[8].path == "/api/v2/project/1/"
- assert self.requests_mock.request_history[8].json() == {"has_valid_clone": True}
+ assert self.requests_mock.request_history[9]._request.method == "PATCH"
+ assert self.requests_mock.request_history[9].path == "/api/v2/project/1/"
+ assert self.requests_mock.request_history[9].json() == {"has_valid_clone": True}
# Update build state: finished, success and builder
- assert self.requests_mock.request_history[9].json() == {
+ assert self.requests_mock.request_history[10].json() == {
"id": 1,
"state": "finished",
"commit": "a1b2c3",
@@ -598,8 +602,8 @@ def test_successful_build(
"error": "",
}
- assert self.requests_mock.request_history[10]._request.method == "POST"
- assert self.requests_mock.request_history[10].path == "/api/v2/revoke/"
+ assert self.requests_mock.request_history[11]._request.method == "POST"
+ assert self.requests_mock.request_history[11].path == "/api/v2/revoke/"
assert BuildData.objects.all().exists()
| Notifications: duplicate messages
Noticed here:
https://beta.readthedocs.org/projects/xclim/builds/23494705/
The error messages seem to stack here:

Also not sure why the string interpolation fails for these, I've noticed that in a few spots though.
On the current dashboard there are no notifications though, not sure why.
https://readthedocs.org/projects/xclim/builds/23494705/
| cc @humitos I don't know if we've fixed this yet or not
Without jumping too much on this yet:
1. for some reason the build is not being reset properly when it's retried. We have code that cleanup all the notifications: https://github.com/readthedocs/readthedocs.org/blob/0a381a7acfc7683755ad0af3ad652c3b2f73fe25/readthedocs/builds/models.py#L1092-L1109
2. since the notification is exactly the same, it should be de-duplicated when it's added. However, when checking the APIv2 view (the one used by the builders), it doesn't seem we are using our custom `.add()` method that de-duplicates notifications: https://github.com/readthedocs/readthedocs.org/blob/0a381a7acfc7683755ad0af3ad652c3b2f73fe25/readthedocs/api/v2/views/model_views.py#L382-L399
So, working on 2) to call `.add()` should be the first point to attack here. I suppose we should overwrite the `api.v2.NotificationSerializer.save()` method to call our custom `.add()`.
> On the current dashboard there are no notifications though, not sure why.
> [readthedocs.org/projects/xclim/builds/23494705](https://readthedocs.org/projects/xclim/builds/23494705/)
I think this is because we implemented permissions on notifications in the last deploy (see https://github.com/readthedocs/readthedocs.org/pull/11117) and there may be a bug on it because those notifications should be shown to all the users, since the build is public.
> Also not sure why the string interpolation fails for these, I've noticed that in a few spots though.
```python
In [12]: Notification.objects.filter(message_id=BuildMaxConcurrencyError.LIMIT_REACHED, format_values={}).count()
Out[12]: 4595
In [13]: Notification.objects.filter(message_id=BuildMaxConcurrencyError.LIMIT_REACHED).exclude(format_values={}).count()
Out[13]: 43746
```
It seems somewhere we are creating this notification without the proper `format_values`. 10% of them are broken. I suppose there should be a Celery workflow that we are not handling correctly here. I doubt it's this one in particular https://github.com/readthedocs/readthedocs.org/blob/0a381a7acfc7683755ad0af3ad652c3b2f73fe25/readthedocs/projects/tasks/builds.py#L362-L374
This is probably worth a separate issue, but I've noticed these build concurrency notifications on a number of builds too. Shouldn't these notifications clear after the build is successfully started?

I've noticed these in a few spots, but it's hard for me to tell if this should be resolved now or not. The above error is on my local instance on a finished build -- same with the original errors I posted here.
I suppose also these should be warning level notifications? That would at least help make it clear that the build is not failed.
> I suppose also these should be warning level notifications? That would at least help make it clear that the build is not failed.
I've done this in https://github.com/readthedocs/readthedocs.org/pull/11196
> Shouldn't these notifications clear after the build is successfully started?
Yes, I've mentioned this in 1) from my previous comment https://github.com/readthedocs/readthedocs.org/issues/11131#issuecomment-1956118651
> So, working on 2) to call `.add()` should be the first point to attack here. I suppose we should overwrite the `api.v2.NotificationSerializer.save()` method to call our custom `.add()`.
This is done in https://github.com/readthedocs/readthedocs.org/pull/11197
Now, with the two PRs I've opened I think this issue shouldn't be an issue anymore. However, 1) could be still present sometimes due to network issues/web instance congestion or similar scenarios where the `reset` API call fails to perform. That said, I'd would move forward with what we have keeping the issue open for now in case we continue seeing it but I wouldn't invest too much time on digging further on debugging for now.
> Yes, I've mentioned this in 1) from my previous comment https://github.com/readthedocs/readthedocs.org/issues/11131#issuecomment-1956118651
That comment implied to me that the messages would only be _deduplicated_ with the fix -- ie:
> since the notification is exactly the same, it should be de-duplicated when it's added.
Are you saying that this fix is also what will dismiss the notification once the build has started?
To clarify, what I would expect here is that while the build is still queued that warning message should show, but once the build is no longer queued due to concurrency limits the message will disappear.
> what I would expect here is that while the build is still queued that warning message should show, but once the build is no longer queued due to concurrency limits the message will disappear.
Yes, this is how it should work.
However, even with all these fixes... the scenario I mentioned before, may be still present on some edge cases 👇🏼
>> However, 1) could be still present sometimes due to network issues/web instance congestion or similar scenarios where the `reset` API call fails to perform.
I found this issue in https://readthedocs.org/projects/docs/builds/23722719/ as well. This was a recent build in our own documentation. I still don't understand why this is happening, but it's worth investigating it a little more.
We should probably run a small Python code from Django shell to remove these notifications from builds that are in finished state. | 2024-03-13T12:51:26 |
readthedocs/readthedocs.org | 11,234 | readthedocs__readthedocs.org-11234 | [
"11214"
] | 5315a7eef4e30890c1ef9c27b4eec94d79829857 | diff --git a/readthedocs/api/v3/filters.py b/readthedocs/api/v3/filters.py
--- a/readthedocs/api/v3/filters.py
+++ b/readthedocs/api/v3/filters.py
@@ -62,9 +62,9 @@ def get_running(self, queryset, name, value):
class NotificationFilter(filters.FilterSet):
class Meta:
model = Notification
- fields = [
- "state",
- ]
+ fields = {
+ "state": ["in", "exact"],
+ }
class RemoteRepositoryFilter(filters.FilterSet):
| API: notification filtering
It sounds like we'll need more complex filtering on the API v3 notifications endpoints to support returning more than one value of `Notification.state`. For example, it sounds like most of the cases we'll need to use `status__in=read,unread`.
| 2024-03-21T11:39:48 |
||
readthedocs/readthedocs.org | 11,278 | readthedocs__readthedocs.org-11278 | [
"11207"
] | 23330ffe0ba208501d07c63a0df10b18e780ae11 | diff --git a/readthedocs/projects/version_handling.py b/readthedocs/projects/version_handling.py
--- a/readthedocs/projects/version_handling.py
+++ b/readthedocs/projects/version_handling.py
@@ -169,23 +169,27 @@ def determine_stable_version(version_list):
return None
-def sort_versions_python_packaging(version_list, latest_stable_at_beginning):
+def sort_versions_generic(
+ version_list,
+ exception,
+ parse_version,
+ latest_stable_at_beginning,
+ raw_pattern=None,
+):
"""
- Sort Read the Docs versions list using ``packaging`` algorithm.
+ Sort Read the Docs versions based on ``parse_version`` function.
- All the invalid version (raise ``InvalidVersion``) are added at the end
- sorted alphabetically.
+ ``parse_version`` function is called with ``slug`` and ``raw_pattern`` as arguments to decide how to sort them.
- https://pypi.org/project/packaging/
- https://packaging.python.org/en/latest/specifications/version-specifiers/
+ All versions that raise ``exception`` are added at the end sorted alphabetically.
"""
+
alphabetically_sorted_version_list = sorted(
version_list,
key=operator.attrgetter("slug"),
)
initial_versions = []
-
valid_versions = []
invalid_versions = []
for i, version in enumerate(alphabetically_sorted_version_list):
@@ -196,8 +200,16 @@ def sort_versions_python_packaging(version_list, latest_stable_at_beginning):
continue
try:
- valid_versions.append((version, Version(version.slug)))
- except InvalidVersion:
+ valid_versions.append(
+ (
+ version,
+ parse_version(
+ slug=version.slug,
+ raw_pattern=raw_pattern,
+ ),
+ )
+ )
+ except exception:
# When the version is invalid, we put it at the end while keeping
# the alphabetically sorting between the invalid ones.
invalid_versions.append((version, None))
@@ -207,19 +219,28 @@ def sort_versions_python_packaging(version_list, latest_stable_at_beginning):
+ sorted(valid_versions, key=operator.itemgetter(1), reverse=True)
+ invalid_versions
)
-
return [item[0] for item in all_versions if item[0] is not None]
-def sort_versions_calver(version_list, latest_stable_at_beginning):
+def sort_versions_python_packaging(version_list, latest_stable_at_beginning):
"""
- Sort Read the Docs versions using CalVer pattern: ``YYYY.0M.0M``.
+ Sort Read the Docs versions list using ``packaging`` algorithm.
- All the invalid version are added at the end sorted alphabetically.
+ All the invalid version (raise ``InvalidVersion``) are added at the end
+ sorted alphabetically.
+
+ https://pypi.org/project/packaging/
+ https://packaging.python.org/en/latest/specifications/version-specifiers/
"""
- raw_pattern = "YYYY.0M.0D"
- return sort_versions_custom_pattern(
- version_list, raw_pattern, latest_stable_at_beginning
+
+ def parse_version(*args, slug=None, **kwargs):
+ return Version(slug)
+
+ return sort_versions_generic(
+ version_list,
+ InvalidVersion,
+ parse_version,
+ latest_stable_at_beginning,
)
@@ -233,40 +254,28 @@ def sort_versions_custom_pattern(version_list, raw_pattern, latest_stable_at_beg
It uses ``Bumpver`` behinds the scenes for the parsing and sorting.
https://github.com/mbarkhau/bumpver
"""
- alphabetically_sorted_version_list = sorted(
+
+ def parse_version(*args, slug=None, raw_pattern=None, **kwargs):
+ return parse_version_info(slug, raw_pattern=raw_pattern)
+
+ return sort_versions_generic(
version_list,
- key=operator.attrgetter("slug"),
+ PatternError,
+ parse_version,
+ latest_stable_at_beginning,
+ raw_pattern,
)
- initial_versions = []
- valid_versions = []
- invalid_versions = []
- for i, version in enumerate(alphabetically_sorted_version_list):
- if latest_stable_at_beginning:
- if version.slug in (STABLE, LATEST):
- # It relies on the version list sorted alphabetically first ("l" comes first than "s")
- initial_versions.append((version, version.slug))
- continue
- try:
- valid_versions.append(
- (
- version,
- parse_version_info(
- version.slug,
- raw_pattern=raw_pattern,
- ),
- )
- )
- except PatternError:
- # When the version is invalid, we put it at the end while keeping
- # the alphabetically sorting between the invalid ones.
- invalid_versions.append((version, None))
+def sort_versions_calver(version_list, latest_stable_at_beginning):
+ """
+ Sort Read the Docs versions using CalVer pattern: ``YYYY.0M.0M``.
- all_versions = (
- initial_versions
- + sorted(valid_versions, key=operator.itemgetter(1), reverse=True)
- + invalid_versions
+ All the invalid version are added at the end sorted alphabetically.
+ """
+ raw_pattern = "YYYY.0M.0D"
+ return sort_versions_custom_pattern(
+ version_list,
+ raw_pattern,
+ latest_stable_at_beginning,
)
-
- return [item[0] for item in all_versions if item[0] is not None]
| Addons: refactor flyout sorting
`sort_versions_python_packaging` and `sort_versions_custom_pattern` functions share most of their code. The only part that differs is what's inside try/catch. Here is the code involved:
https://github.com/readthedocs/readthedocs.org/blob/f0c74a644cc1e18c04ae11a31c9fbef01b8ca073/readthedocs/projects/version_handling.py#L226-L272
Reference https://github.com/readthedocs/readthedocs.org/pull/11069#discussion_r1520128348
| I jumped into this issue and I'm still not sure what's the best way to refactor this code. Basically, the only part we need to change is the `try/except` clause. All the rest is identical.
On that `try/except` clause the different parts are:
- the exception that needs to be handled
- the second argument required to generate the tuple
Would it be enough to isolate all this code in a function that receives `exception=` and `parse_version=`? 🤔 | 2024-04-15T10:56:06 |
|
readthedocs/readthedocs.org | 11,327 | readthedocs__readthedocs.org-11327 | [
"11309"
] | a6130d37597b6ceabb462055822ab25964044334 | diff --git a/readthedocs/oauth/services/gitlab.py b/readthedocs/oauth/services/gitlab.py
--- a/readthedocs/oauth/services/gitlab.py
+++ b/readthedocs/oauth/services/gitlab.py
@@ -35,7 +35,7 @@ class GitLabService(Service):
# Just use the network location to determine if it's a GitLab project
# because private repos have another base url, eg. [email protected]
url_pattern = re.compile(
- re.escape(urlparse(adapter.provider_base_url).netloc),
+ re.escape(urlparse(adapter.provider_default_url).netloc),
)
PERMISSION_NO_ACCESS = 0
@@ -73,7 +73,7 @@ def sync_repositories(self):
remote_repositories = []
try:
repos = self.paginate(
- "{url}/api/v4/projects".format(url=self.adapter.provider_base_url),
+ "{url}/api/v4/projects".format(url=self.adapter.provider_default_url),
per_page=100,
archived=False,
order_by="path",
@@ -100,7 +100,7 @@ def sync_organizations(self):
try:
orgs = self.paginate(
- "{url}/api/v4/groups".format(url=self.adapter.provider_base_url),
+ "{url}/api/v4/groups".format(url=self.adapter.provider_default_url),
per_page=100,
all_available=False,
order_by="path",
@@ -110,7 +110,7 @@ def sync_organizations(self):
remote_organization = self.create_organization(org)
org_repos = self.paginate(
"{url}/api/v4/groups/{id}/projects".format(
- url=self.adapter.provider_base_url,
+ url=self.adapter.provider_default_url,
id=org["id"],
),
per_page=100,
@@ -131,7 +131,7 @@ def sync_organizations(self):
# which contains the admin permission fields.
resp = self.get_session().get(
"{url}/api/v4/projects/{id}".format(
- url=self.adapter.provider_base_url, id=repo["id"]
+ url=self.adapter.provider_default_url, id=repo["id"]
)
)
@@ -270,7 +270,7 @@ def create_organization(self, fields):
organization.name = fields.get("name")
organization.slug = fields.get("path")
organization.url = "{url}/{path}".format(
- url=self.adapter.provider_base_url,
+ url=self.adapter.provider_default_url,
path=fields.get("path"),
)
organization.avatar_url = fields.get("avatar_url")
@@ -336,7 +336,7 @@ def get_provider_data(self, project, integration):
try:
resp = session.get(
"{url}/api/v4/projects/{repo_id}/hooks".format(
- url=self.adapter.provider_base_url,
+ url=self.adapter.provider_default_url,
repo_id=repo_id,
),
)
@@ -383,7 +383,7 @@ def setup_webhook(self, project, integration=None):
)
repo_id = self._get_repo_id(project)
- url = f"{self.adapter.provider_base_url}/api/v4/projects/{repo_id}/hooks"
+ url = f"{self.adapter.provider_default_url}/api/v4/projects/{repo_id}/hooks"
if repo_id is None:
return (False, resp)
@@ -462,7 +462,7 @@ def update_webhook(self, project, integration):
hook_id = provider_data.get("id")
resp = session.put(
"{url}/api/v4/projects/{repo_id}/hooks/{hook_id}".format(
- url=self.adapter.provider_base_url,
+ url=self.adapter.provider_default_url,
repo_id=repo_id,
hook_id=hook_id,
),
@@ -537,7 +537,7 @@ def send_build_status(self, build, commit, status):
"description": description,
"context": context,
}
- url = f"{self.adapter.provider_base_url}/api/v4/projects/{repo_id}/statuses/{commit}"
+ url = f"{self.adapter.provider_default_url}/api/v4/projects/{repo_id}/statuses/{commit}"
log.bind(
project_slug=project.slug,
diff --git a/readthedocs/settings/base.py b/readthedocs/settings/base.py
--- a/readthedocs/settings/base.py
+++ b/readthedocs/settings/base.py
@@ -285,7 +285,6 @@ def INSTALLED_APPS(self): # noqa
"allauth.socialaccount",
"allauth.socialaccount.providers.github",
"allauth.socialaccount.providers.gitlab",
- "allauth.socialaccount.providers.bitbucket",
"allauth.socialaccount.providers.bitbucket_oauth2",
"cacheops",
]
@@ -698,12 +697,6 @@ def DOCKER_LIMITS(self):
],
# Bitbucket scope/permissions are determined by the Oauth consumer setup on bitbucket.org.
},
- # Deprecated, we use `bitbucket_oauth2` for all new connections.
- "bitbucket": {
- "APPS": [
- {"client_id": "123", "secret": "456", "key": ""},
- ],
- },
}
@property
| diff --git a/readthedocs/rtd_tests/tests/test_celery.py b/readthedocs/rtd_tests/tests/test_celery.py
--- a/readthedocs/rtd_tests/tests/test_celery.py
+++ b/readthedocs/rtd_tests/tests/test_celery.py
@@ -149,7 +149,7 @@ def test_send_build_status_no_remote_repo_or_social_account_github(
notification.format_values,
{
"provider_name": "GitHub",
- "url_connect_account": "/accounts/social/connections/",
+ "url_connect_account": "/accounts/3rdparty/",
},
)
@@ -222,6 +222,6 @@ def test_send_build_status_no_remote_repo_or_social_account_gitlab(
notification.format_values,
{
"provider_name": "GitLab",
- "url_connect_account": "/accounts/social/connections/",
+ "url_connect_account": "/accounts/3rdparty/",
},
)
diff --git a/requirements/testing.in b/requirements/testing.in
--- a/requirements/testing.in
+++ b/requirements/testing.in
@@ -2,7 +2,7 @@
-r pip.txt
-sphinx
+sphinx[test]
django-dynamic-fixture
pytest
pytest-custom-exit-code
diff --git a/requirements/testing.txt b/requirements/testing.txt
--- a/requirements/testing.txt
+++ b/requirements/testing.txt
@@ -91,10 +91,10 @@ cssselect==1.2.0
# via
# -r requirements/pip.txt
# pyquery
+cython==3.0.10
+ # via sphinx
defusedxml==0.7.1
- # via
- # -r requirements/pip.txt
- # python3-openid
+ # via sphinx
distlib==0.3.8
# via
# -r requirements/pip.txt
@@ -125,7 +125,7 @@ django==4.2.13
# django-timezone-field
# djangorestframework
# jsonfield
-django-allauth[saml]==0.57.2
+django-allauth[saml,socialaccount]==0.63.1
# via -r requirements/pip.txt
django-annoying==0.10.6
# via -r requirements/pip.txt
@@ -317,6 +317,7 @@ pytest==8.2.0
# pytest-custom-exit-code
# pytest-django
# pytest-mock
+ # sphinx
pytest-cov==5.0.0
# via -r requirements/testing.in
pytest-custom-exit-code==0.3.0
@@ -335,10 +336,6 @@ python-dateutil==2.9.0.post0
# botocore
# elasticsearch-dsl
# python-crontab
-python3-openid==3.2.0
- # via
- # -r requirements/pip.txt
- # django-allauth
python3-saml==1.16.0
# via
# -r requirements/pip.txt
@@ -396,7 +393,7 @@ slumber==0.7.1
# via -r requirements/pip.txt
snowballstemmer==2.2.0
# via sphinx
-sphinx==7.3.7
+sphinx[test]==7.3.7
# via -r requirements/testing.in
sphinxcontrib-applehelp==1.0.8
# via sphinx
@@ -483,3 +480,6 @@ xmlsec==1.3.13
# python3-saml
yamale==2.2.0
# via -r requirements/testing.in
+
+# The following packages are considered to be unsafe in a requirements file:
+# setuptools
| Allauth: update to use latest version
## What's the problem this feature will solve?
The latest versions of allauth include some improvements and fixes related to SAML, which is a feature we are active developing for .com.
Last time I saw that were some breaking changes related to templates, and changes around using json fields.
## Additional context
The last time we updated allauth (https://github.com/readthedocs/readthedocs.org/pull/11134) we hit only one problem I think, it was related to a missing changelog entry about a field change in google. Another problem we had we about the settings, but that wasn't related to allauth itself, but to our settings inheritance.
| > latest versions of allauth include some improvements and fixes related to SAML
What are those improvements and fixes? Knowing that will help us to work on prioritization of this upgrade.
You can just check the changelog and look for "SAML" https://docs.allauth.org/en/latest/release-notes/recent.html Some important ones are:
- SAML: accessing the SLS/ACS views using a GET request would result in a crash (500).
- SAML: the login view did not obey the SOCIALACCOUNT_LOGIN_ON_GET = False setting. | 2024-05-15T22:02:12 |
readthedocs/readthedocs.org | 11,354 | readthedocs__readthedocs.org-11354 | [
"11231"
] | a127e9900f1853818021e290a70b9d78be23f150 | diff --git a/readthedocs/projects/filters.py b/readthedocs/projects/filters.py
--- a/readthedocs/projects/filters.py
+++ b/readthedocs/projects/filters.py
@@ -234,7 +234,7 @@ def get_version_queryset(self):
# This query is passed in at instantiation
return self.queryset
- def get_visibility(self, queryset, *, value):
+ def get_visibility(self, queryset, field_name, value):
if value == self.VISIBILITY_HIDDEN:
return queryset.filter(hidden=True)
if value == self.VISIBILITY_VISIBLE:
| Project version filter returns 500
It seems the `ProjectVersionListFilterSet.get_visibility()` is receiving the wrong amount of arguments.
To reproduce, just hit https://beta.readthedocs.org/projects/bigo-live-hack/?privacy=&sort=&visibility=hidden
Sentry issue: https://read-the-docs.sentry.io/issues/4721614191/?project=148442&query=is%3Aunresolved&referrer=issue-stream&statsPeriod=7d&stream_index=7
| Moving this to the application, it's a bug with the view/filter in the application.
I can take this on, I'm already working in these filters. | 2024-05-23T22:11:47 |
|
readthedocs/readthedocs.org | 11,361 | readthedocs__readthedocs.org-11361 | [
"11292"
] | b867beef6c30003a8f78fc7d2f81e2ce1afeb6b1 | diff --git a/readthedocs/proxito/views/hosting.py b/readthedocs/proxito/views/hosting.py
--- a/readthedocs/proxito/views/hosting.py
+++ b/readthedocs/proxito/views/hosting.py
@@ -348,7 +348,15 @@ def _v1(self, project, version, build, filename, url, user):
)
main_project = project.main_language_project or project
- project_translations = main_project.translations.all().order_by("language")
+
+ # Exclude the current project since we don't want to return itself as a translation
+ project_translations = main_project.translations.all().exclude(
+ slug=project.slug
+ )
+ # Include main project as translation if the current project is one of the translations
+ if project != main_project:
+ project_translations |= Project.objects.filter(slug=main_project.slug)
+ project_translations = project_translations.order_by("language")
data = {
"api_version": "1",
| diff --git a/readthedocs/proxito/tests/test_hosting.py b/readthedocs/proxito/tests/test_hosting.py
--- a/readthedocs/proxito/tests/test_hosting.py
+++ b/readthedocs/proxito/tests/test_hosting.py
@@ -300,6 +300,7 @@ def test_flyout_translations(self):
)
assert r.status_code == 200
+ # Hitting the English version of the docs, will return Japanese as translation
assert len(r.json()["projects"]["translations"]) == 1
assert r.json()["projects"]["translations"][0]["slug"] == "translation"
assert r.json()["projects"]["translations"][0]["language"]["code"] == "ja"
@@ -308,6 +309,28 @@ def test_flyout_translations(self):
== "https://project.dev.readthedocs.io/ja/latest/"
)
+ # Hitting the Japanese version of the docs, will return English as translation
+ r = self.client.get(
+ reverse("proxito_readthedocs_docs_addons"),
+ {
+ "url": "https://project.dev.readthedocs.io/ja/latest/",
+ "client-version": "0.6.0",
+ "api-version": "1.0.0",
+ },
+ secure=True,
+ headers={
+ "host": "project.dev.readthedocs.io",
+ },
+ )
+ assert r.status_code == 200
+ assert len(r.json()["projects"]["translations"]) == 1
+ assert r.json()["projects"]["translations"][0]["slug"] == "project"
+ assert r.json()["projects"]["translations"][0]["language"]["code"] == "en"
+ assert (
+ r.json()["projects"]["translations"][0]["urls"]["documentation"]
+ == "https://project.dev.readthedocs.io/en/latest/"
+ )
+
def test_flyout_downloads(self):
fixture.get(
Version,
| Addons: don't return current translation and include source language in `projects.translations` field
Currently, `project.translations` is returning all the translations from `main_project`. I realized that we don't always want this. This is only useful when browsing the _source_ language of the documentation. However, when reading any of its translations `project.translations` **should not show** the current translation (the one the user is reading) and **should show** the _source_ language as translation as well.
### Example
If the project has English (source language), Spanish and Japanese translations, `projects.translations` should contain:
- Spanish and Japanese, when hitting the English translation
- English and Japanese, when hitting the Spanish translation
### How to reproduce it
1. Go to https://cpython-previews.readthedocs.io/es/latest/
2. Open the flyout
3. You will see that `es` is duplicated

| I think something like this could work:
```diff
diff --git a/readthedocs/proxito/views/hosting.py b/readthedocs/proxito/views/hosting.py
index 4b598e21b..780e09091 100644
--- a/readthedocs/proxito/views/hosting.py
+++ b/readthedocs/proxito/views/hosting.py
@@ -342,7 +342,12 @@ class AddonsResponse:
)
main_project = project.main_language_project or project
- project_translations = main_project.translations.all().order_by("language")
+ # Exclude the current project since we don't want to return itself as a translation
+ project_translations = main_project.translations.all().exclude(slug=project.slug).order_by("language")
+
+ # Include main project as translation if the current project is one of the translations
+ if project != main_project:
+ project_translations |= Project.objects.filter(slug=main_project.slug)
data = {
"api_version": "1",
```
I tested the previous diff locally and it works fine 👍🏼 . I need to open a PR and write some tests. | 2024-05-28T15:33:06 |
readthedocs/readthedocs.org | 11,386 | readthedocs__readthedocs.org-11386 | [
"11350"
] | 88d7c6e2bf4cc472e9cb349a7f4340160a55e4d6 | diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py
--- a/readthedocs/builds/constants_docker.py
+++ b/readthedocs/builds/constants_docker.py
@@ -32,24 +32,26 @@
"2.7": "2.7.18",
"3.6": "3.6.15",
"3.7": "3.7.17",
- "3.8": "3.8.18",
- "3.9": "3.9.18",
- "3.10": "3.10.13",
- "3.11": "3.11.6",
- "3.12": "3.12.0",
+ "3.8": "3.8.19",
+ "3.9": "3.9.19",
+ "3.10": "3.10.14",
+ "3.11": "3.11.9",
+ "3.12": "3.12.3",
"miniconda3-4.7": "miniconda3-4.7.12",
+ "miniconda3-3.12-24.1": "miniconda3-3.12-24.1.2-0",
"mambaforge-4.10": "mambaforge-4.10.3-10",
"mambaforge-22.9": "mambaforge-22.9.0-3",
+ "mambaforge-23.11": "mambaforge-23.11.0-0",
},
"nodejs": {
"14": "14.20.1",
"16": "16.18.1",
"18": "18.16.1",
"19": "19.0.1",
- "20": "20.11.0", # LTS
+ "20": "20.14.0", # LTS
},
"ruby": {
- "3.3": "3.3.0",
+ "3.3": "3.3.2",
},
"rust": {
"1.55": "1.55.0",
@@ -57,13 +59,15 @@
"1.64": "1.64.0",
"1.70": "1.70.0",
"1.75": "1.75.0",
+ "1.78": "1.78.0",
},
"golang": {
"1.17": "1.17.13",
"1.18": "1.18.10",
- "1.19": "1.19.10",
- "1.20": "1.20.5",
- "1.21": "1.21.6",
+ "1.19": "1.19.13",
+ "1.20": "1.20.14",
+ "1.21": "1.21.11",
+ "1.22": "1.22.4",
},
},
}
@@ -74,9 +78,9 @@
_OS["ubuntu-lts-latest"] = _OS["ubuntu-22.04"]
_TOOLS["python"]["3"] = _TOOLS["python"]["3.12"]
_TOOLS["python"]["latest"] = _TOOLS["python"]["3"]
-_TOOLS["python"]["miniconda-latest"] = _TOOLS["python"]["miniconda3-4.7"]
-_TOOLS["python"]["mambaforge-latest"] = _TOOLS["python"]["mambaforge-22.9"]
+_TOOLS["python"]["miniconda-latest"] = _TOOLS["python"]["miniconda3-3.12-24.1"]
+_TOOLS["python"]["mambaforge-latest"] = _TOOLS["python"]["mambaforge-23.11"]
_TOOLS["nodejs"]["latest"] = _TOOLS["nodejs"]["20"]
_TOOLS["ruby"]["latest"] = _TOOLS["ruby"]["3.3"]
-_TOOLS["rust"]["latest"] = _TOOLS["rust"]["1.75"]
-_TOOLS["golang"]["latest"] = _TOOLS["golang"]["1.21"]
+_TOOLS["rust"]["latest"] = _TOOLS["rust"]["1.78"]
+_TOOLS["golang"]["latest"] = _TOOLS["golang"]["1.22"]
| diff --git a/readthedocs/projects/tests/test_build_tasks.py b/readthedocs/projects/tests/test_build_tasks.py
--- a/readthedocs/projects/tests/test_build_tasks.py
+++ b/readthedocs/projects/tests/test_build_tasks.py
@@ -524,7 +524,7 @@ def test_successful_build(
},
"tools": {
"python": {
- "full_version": "3.12.0",
+ "full_version": "3.12.3",
"version": "3",
}
},
diff --git a/readthedocs/rtd_tests/fixtures/spec/v2/schema.json b/readthedocs/rtd_tests/fixtures/spec/v2/schema.json
--- a/readthedocs/rtd_tests/fixtures/spec/v2/schema.json
+++ b/readthedocs/rtd_tests/fixtures/spec/v2/schema.json
@@ -153,9 +153,11 @@
"3.12",
"latest",
"miniconda3-4.7",
+ "miniconda3-3.12-24.1",
"miniconda-latest",
"mambaforge-4.10",
"mambaforge-22.9",
+ "mambaforge-23.11",
"mambaforge-latest"
]
},
@@ -182,6 +184,7 @@
"1.64",
"1.70",
"1.75",
+ "1.78",
"latest"
]
},
@@ -192,6 +195,7 @@
"1.19",
"1.20",
"1.21",
+ "1.22",
"latest"
]
}
| Build tools: update `latest` versions
## What's the problem this feature will solve?
The version we have of miniconda (4.7) is quite old, I remember checking for this some time ago, and thinking that there weren't more versions available (5.x for example). Turns out that they changed their versioning scheme some time ago... Their versions now appear to be linked to a python version and the miniconda version (e.g `miniconda3-3.12-24.1.2.0`).
## Describe the solution you'd like
We are close to June, that's our 6 months policy update for all build tools, so we can maybe wait until that to update it. Or just do one exception, as our policy mentions.
## Alternative solutions
Maybe find a way to manually upgrade miniconda for users that need it?
## Additional context
We have one user not being able to build their docs, a probable cause is the version of miniconda. See https://github.com/numba/pixie/pull/12.
| > We are close to June, that's our 6 months policy update for all build tools, so we can maybe wait until that to update it
Yeah, I think it's fine to update them all in June. We are only one week away from it and we need to do the work, review, deploy, etc still.
Please let me know if you need any details about this (this also ships with the mamba solver as the new backend for conda, making it almost as fast as mamba CLI). Blog post: https://conda.org/blog/2023-11-06-conda-23-10-0-release
We are using `asdf` (https://asdf-vm.com/) to install these versions, so we will need to use the following ones as latest(s):
```
▶ asdf list all python | grep miniconda | grep 24\.
miniconda3-3.9-24.1.2-0
miniconda3-3.10-24.1.2-0
miniconda3-3.11-24.1.2-0
miniconda3-3.12-24.1.2-0
```
```
▶ asdf list all python | grep mambaforge | grep 23\.
mambaforge-23.1.0-0
mambaforge-23.1.0-1
mambaforge-23.1.0-2
mambaforge-23.1.0-3
mambaforge-23.1.0-4
mambaforge-23.3.0-0
mambaforge-23.3.1-0
mambaforge-23.3.1-1
mambaforge-23.10.0-0
mambaforge-23.11.0-0
``` | 2024-06-06T17:58:32 |
readthedocs/readthedocs.org | 11,410 | readthedocs__readthedocs.org-11410 | [
"11409"
] | 05809349bcd78e4e31424266ed2bc6811e29c286 | diff --git a/readthedocs/notifications/messages.py b/readthedocs/notifications/messages.py
--- a/readthedocs/notifications/messages.py
+++ b/readthedocs/notifications/messages.py
@@ -232,8 +232,9 @@ def get_rendered_body(self):
header=_("No HTML content found"),
body=_(
textwrap.dedent(
- f"""
- No "{BUILD_COMMANDS_OUTPUT_PATH_HTML}" folder was created during this build.
+ """
+ No content was output to the path "$READTHEDOCS_OUTPUT/html".
+ Read more about <a href="https://docs.readthedocs.io/page/build-customization.html#where-to-put-files">where to put your built files</a>.
"""
).strip(),
),
| Notification: clarify _readthedocs/html path solution
If you don't output a file during the build, you will see the following notification:

However, if you try outputting to this path without using the environment variable, you'll get a path not found error:
```
echo "Foo" > _readthedocs/html/index.html
/bin/sh: 1: cannot create _readthedocs/html/index.html: Directory nonexistent
```
- [ ] Maybe note the environment variable in the error notification in addition, or instead of `_readthedocs/html`.
- [ ] Point to the documentation we have on this environment variable
| 2024-06-13T08:16:22 |
||
readthedocs/readthedocs.org | 11,411 | readthedocs__readthedocs.org-11411 | [
"11405"
] | 8d91addb4305bcd7caeea1f624b7e1ea701fbe8f | diff --git a/readthedocs/doc_builder/director.py b/readthedocs/doc_builder/director.py
--- a/readthedocs/doc_builder/director.py
+++ b/readthedocs/doc_builder/director.py
@@ -418,10 +418,6 @@ def check_old_output_directory(self):
def run_build_commands(self):
"""Runs each build command in the build environment."""
- self.attach_notification(
- message_id=BuildUserError.BUILD_COMMANDS_IN_BETA,
- )
-
reshim_commands = (
{"pip", "install"},
{"conda", "create"},
| Build: remove "beta" warning from `build.commands`
`build.commands` has been behaving as a stable feature for a long time and we want to remove the "beta" framing around this since it produces some concerns on users regarding its future stability and the possibility of deprecation in the future.
Some places that I remember where this appears:
- Documentation
- Build notification
| 2024-06-13T09:08:24 |
||
readthedocs/readthedocs.org | 11,421 | readthedocs__readthedocs.org-11421 | [
"11072"
] | 1ad7a38b163b916b929791d87746eaf6995ab371 | diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py
--- a/readthedocs/builds/constants_docker.py
+++ b/readthedocs/builds/constants_docker.py
@@ -25,6 +25,7 @@
"os": {
"ubuntu-20.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04",
"ubuntu-22.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04",
+ "ubuntu-24.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-24.04",
},
# Mapping of build.tools options to specific versions.
"tools": {
@@ -75,7 +76,11 @@
# Set latest aliases for OS and tools.
_OS = RTD_DOCKER_BUILD_SETTINGS["os"]
_TOOLS = RTD_DOCKER_BUILD_SETTINGS["tools"]
+
+# TODO: point ``ubuntu-lts-latest`` to Ubuntu 24.04 LTS once we have tested it
+# in production after some weeks
_OS["ubuntu-lts-latest"] = _OS["ubuntu-22.04"]
+
_TOOLS["python"]["3"] = _TOOLS["python"]["3.12"]
_TOOLS["python"]["latest"] = _TOOLS["python"]["3"]
_TOOLS["python"]["miniconda-latest"] = _TOOLS["python"]["miniconda3-3.12-24.1"]
| diff --git a/readthedocs/rtd_tests/fixtures/spec/v2/schema.json b/readthedocs/rtd_tests/fixtures/spec/v2/schema.json
--- a/readthedocs/rtd_tests/fixtures/spec/v2/schema.json
+++ b/readthedocs/rtd_tests/fixtures/spec/v2/schema.json
@@ -61,6 +61,7 @@
"enum": [
"ubuntu-20.04",
"ubuntu-22.04",
+ "ubuntu-24.04",
"ubuntu-lts-latest"
]
},
| Build: expose `ubuntu-24.04` as an option for `build.os`
We are close to Ubuntu 24.04 release. We should expose it to our users.
| Ubuntu 24.04 is planned to be released on April 25 | 2024-06-17T13:56:29 |
readthedocs/readthedocs.org | 11,429 | readthedocs__readthedocs.org-11429 | [
"11428"
] | 299ba15c6536e4178173eed67c77d13efda13055 | diff --git a/readthedocs/projects/constants.py b/readthedocs/projects/constants.py
--- a/readthedocs/projects/constants.py
+++ b/readthedocs/projects/constants.py
@@ -310,6 +310,11 @@
re.escape(code)
for code in LANGUAGE_CODES + list(OLD_LANGUAGES_CODE_MAPPING.values())
]
+ # Add "zh" here to be able to keep serving projects with this old invalid language code.
+ # We don't allow new projects to select this language code anymore.
+ #
+ # https://github.com/readthedocs/readthedocs.org/issues/11428
+ + ["zh"]
)
PROGRAMMING_LANGUAGES = (
| diff --git a/readthedocs/proxito/tests/test_full.py b/readthedocs/proxito/tests/test_full.py
--- a/readthedocs/proxito/tests/test_full.py
+++ b/readthedocs/proxito/tests/test_full.py
@@ -109,6 +109,17 @@ def test_translation_serving(self):
"/proxito/media/html/translation/latest/awesome.html",
)
+ def test_translation_zh_deprecated_code_serving(self):
+ self.translation.language = "zh"
+ self.translation.save()
+ url = "/zh/latest/awesome.html"
+ host = "project.dev.readthedocs.io"
+ resp = self.client.get(url, headers={"host": host})
+ self.assertEqual(
+ resp["x-accel-redirect"],
+ "/proxito/media/html/translation/latest/awesome.html",
+ )
+
def test_normal_serving(self):
url = "/en/latest/awesome.html"
host = "project.dev.readthedocs.io"
| Document serve failed today
## Details
* Read the Docs project URL: https://readthedocs.org/projects/{your_project_slug}/
* Build URL (if applicable): https://readthedocs.org/projects/{your_project_slug}/builds/{build_id}/
* Read the Docs username (if applicable): https://readthedocs.org/profiles/HydrogenSulfate/
## Expected Result
*A description of what you wanted to happen*
Today our document can not be accessed and report 404: <https://paddlescience-docs.readthedocs.io/zh/latest/>. But all things right yesterday. Can any one help?
## Actual Result
*A description of what actually happened*
| Hi, sorry about that. Looks like a bug that was introduced in https://github.com/readthedocs/readthedocs.org/pull/11385. We are discussing how to fix this. | 2024-06-19T14:15:55 |
holoviz/hvplot | 85 | holoviz__hvplot-85 | [
"82"
] | d42b9566f97b55717c92c063338831c04c30a688 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -145,7 +145,7 @@ def __init__(self, data, x, y, kind=None, by=None, use_index=True,
self.group_label = group_label
self.dynamic = dynamic
self.geo = geo or crs or global_extent or projection
- self.crs = process_crs(crs) if self.geo else None
+ self.crs = self._process_crs(data, crs) if self.geo else None
self.row = row
self.col = col
@@ -239,6 +239,21 @@ def __init__(self, data, x, y, kind=None, by=None, use_index=True,
'y: {y}, by: {by}, groupby: {groupby}'.format(**kwds))
+ def _process_crs(self, data, crs):
+ """Given crs as proj4 string, data.attr, or cartopy.crs return cartopy.crs
+ """
+ # get the proj string: either the value of data.attrs[crs] or crs itself
+ _crs = getattr(data, 'attrs', {}).get(crs or 'crs', crs)
+ try:
+ return process_crs(_crs)
+ except ValueError:
+ # only raise error if crs was specified in kwargs
+ if crs:
+ raise ValueError(
+ "'{}' must be either a valid crs or an reference to "
+ "a `data.attr` containing a valid crs.".format(crs))
+
+
def _process_data(self, kind, data, x, y, by, groupby, row, col,
use_dask, persist, backlog, label, value_label,
hover_cols, kwds):
| diff --git a/hvplot/tests/testgeo.py b/hvplot/tests/testgeo.py
new file mode 100644
--- /dev/null
+++ b/hvplot/tests/testgeo.py
@@ -0,0 +1,48 @@
+from unittest import TestCase, SkipTest
+
+
+class TestGeo(TestCase):
+ def setUp(self):
+ try:
+ import xarray as xr
+ import rasterio # noqa
+ import geoviews # noqa
+ import cartopy.crs as ccrs
+ except:
+ raise SkipTest('xarray, rasterio, geoviews, or cartopy not available')
+ import hvplot.xarray # noqa
+ self.da = (xr.open_rasterio(
+ 'https://github.com/mapbox/rasterio/raw/master/tests/data/RGB.byte.tif')
+ .sel(band=1))
+ self.crs = ccrs.epsg(self.da.crs.split('epsg:')[1])
+
+ def assertCRS(self, plot, proj='utm'):
+ assert plot.crs.proj4_params['proj'] == proj
+
+ def test_plot_with_crs_as_object(self):
+ plot = self.da.hvplot('x', 'y', crs=self.crs)
+ self.assertCRS(plot)
+
+ def test_plot_with_crs_as_proj_string(self):
+ plot = self.da.hvplot('x', 'y', crs=self.da.crs)
+ self.assertCRS(plot)
+
+ def test_plot_with_geo_as_true_crs_undefined(self):
+ plot = self.da.hvplot('x', 'y', geo=True)
+ self.assertCRS(plot)
+
+ def test_plot_with_crs_as_attr_str(self):
+ da = self.da.copy()
+ da.attrs = {'bar': self.crs}
+ plot = da.hvplot('x', 'y', crs='bar')
+ self.assertCRS(plot)
+
+ def test_plot_with_crs_as_nonexistent_attr_str(self):
+ with self.assertRaisesRegex(ValueError, "'foo' must be"):
+ self.da.hvplot('x', 'y', crs='foo')
+
+ def test_plot_with_geo_as_true_crs_no_crs_on_data_returns_default(self):
+ da = self.da.copy()
+ da.attrs = {'bar': self.crs}
+ plot = da.hvplot('x', 'y', geo=True)
+ self.assertCRS(plot, 'eqc')
| `geo=True` should look for `crs` on `xarray.dataset` if none provided
This came out of a conversation with @philippjfr. Potentially `crs` could also accept a string which would refer to an `attr` or I guess a `coord` containing the `crs` info. This would be a little trickier to implement since `crs` already accepts a proj4 string. But I think it'd be nicely parallel to how x, y, and z are set.
| 2018-10-05T19:23:31 |
|
holoviz/hvplot | 126 | holoviz__hvplot-126 | [
"125"
] | d41cce06b12895435c7616378b853ba83249dd51 | diff --git a/hvplot/util.py b/hvplot/util.py
--- a/hvplot/util.py
+++ b/hvplot/util.py
@@ -237,18 +237,21 @@ def is_geopandas(data):
def process_xarray(data, x, y, by, groupby, use_dask, persist, gridded, label, value_label):
import xarray as xr
- dataset = data
- data_vars = list(dataset.data_vars) if isinstance(data, xr.Dataset) else [data.name]
+ if isinstance(data, xr.Dataset):
+ dataset = data
+ else:
+ name = data.name or label or value_label
+ dataset = data.to_dataset(name=name)
+
+ data_vars = list(dataset.data_vars)
ignore = (by or []) + (groupby or [])
- dims = [c for c in data.coords if data[c].shape != () and c not in ignore][::-1]
- index_dims = [d for d in dims if d in data.indexes]
+ dims = [c for c in dataset.coords if dataset[c].shape != () and c not in ignore][::-1]
+ index_dims = [d for d in dims if d in dataset.indexes]
if gridded:
data = dataset
- if data_vars == [None]:
- label = label or value_label
- data = data.to_dataset(name=label)
- data_vars = [label]
+ if len(dims) < 2:
+ dims += [dim for dim in list(data.dims)[::-1] if dim not in dims]
if not (x or y):
x, y = index_dims[:2] if len(index_dims) > 1 else dims[:2]
elif x and not y:
@@ -259,19 +262,15 @@ def process_xarray(data, x, y, by, groupby, use_dask, persist, gridded, label, v
dims = list(data.coords[x].dims) + list(data.coords[y].dims)
groupby = [d for d in index_dims if d not in (x, y) and d not in dims]
else:
- name = None
- if not isinstance(dataset, xr.Dataset):
- name = dataset.name or label or value_label
- data_vars = [name]
if use_dask:
- if not isinstance(dataset, xr.Dataset):
- dataset = dataset.to_dataset(name=name)
data = dataset.to_dask_dataframe()
data = data.persist() if persist else data
else:
- data = dataset.to_dataframe(name=name)
+ data = dataset.to_dataframe()
if len(data.index.names) > 1:
data = data.reset_index()
+ if len(dims) == 0:
+ dims = ['index']
if x and not y:
y = dims[0] if x in data_vars else data_vars
elif y and not x:
| diff --git a/hvplot/tests/testutil.py b/hvplot/tests/testutil.py
new file mode 100644
--- /dev/null
+++ b/hvplot/tests/testutil.py
@@ -0,0 +1,149 @@
+"""
+Tests utilities to convert data and projections
+"""
+import numpy as np
+
+from unittest import TestCase, SkipTest
+
+from hvplot.util import process_xarray # noqa
+
+
+class TestProcessXarray(TestCase):
+
+ def setUp(self):
+ try:
+ import xarray as xr
+ import pandas as pd # noqa
+ except:
+ raise SkipTest('xarray or pandas not available')
+ self.default_kwargs = {
+ 'value_label': 'value',
+ 'label': None,
+ 'gridded': False,
+ 'persist': False,
+ 'use_dask': False,
+ 'groupby': None,
+ 'y': None,
+ 'x': None,
+ 'by': None
+ }
+ self.ds = xr.tutorial.open_dataset('air_temperature')
+
+ def test_process_1d_xarray_dataarray_with_no_coords(self):
+ import xarray as xr
+ import pandas as pd
+
+ da = xr.DataArray(
+ data=[1, 2, 3])
+
+ data, x, y, by, groupby = process_xarray(data=da, **self.default_kwargs)
+ assert isinstance(data, pd.DataFrame)
+ assert x == 'index'
+ assert y == ['value']
+ assert by == []
+ assert groupby == []
+
+ def test_process_1d_xarray_dataarray_with_coords(self):
+ import xarray as xr
+ import pandas as pd
+
+ da = xr.DataArray(
+ data=[1, 2, 3],
+ coords={'day': [5, 6, 7]},
+ dims=['day'])
+
+ data, x, y, by, groupby = process_xarray(data=da, **self.default_kwargs)
+ assert isinstance(data, pd.DataFrame)
+ assert x == 'day'
+ assert y == ['value']
+ assert by == []
+ assert groupby == []
+
+ def test_process_1d_xarray_dataarray_with_coords_and_name(self):
+ import xarray as xr
+ import pandas as pd
+
+ da = xr.DataArray(
+ data=[1, 2, 3],
+ coords={'day': [5, 6, 7]},
+ dims=['day'],
+ name='temp')
+
+ data, x, y, by, groupby = process_xarray(data=da, **self.default_kwargs)
+ assert isinstance(data, pd.DataFrame)
+ assert x == 'day'
+ assert y == ['temp']
+ assert by == []
+ assert groupby == []
+
+ def test_process_2d_xarray_dataarray_with_no_coords(self):
+ import xarray as xr
+ import pandas as pd
+
+ da = xr.DataArray(np.random.randn(4,5))
+
+ data, x, y, by, groupby = process_xarray(data=da, **self.default_kwargs)
+ assert isinstance(data, pd.DataFrame)
+ assert x == 'index'
+ assert y == ['value']
+ assert by == []
+ assert groupby == []
+
+ def test_process_2d_xarray_dataarray_with_no_coords_as_gridded(self):
+ import xarray as xr
+
+ da = xr.DataArray(np.random.randn(4,5))
+
+ kwargs = self.default_kwargs
+ kwargs.update(gridded=True)
+
+ data, x, y, by, groupby = process_xarray(data=da, **kwargs)
+ assert isinstance(data, xr.Dataset)
+ assert list(data.data_vars.keys()) == ['value']
+ assert x == 'dim_1'
+ assert y == 'dim_0'
+ assert by is None
+ assert groupby is None
+
+ def test_process_2d_xarray_dataarray_with_coords_as_gridded(self):
+ import xarray as xr
+
+ da = xr.DataArray(
+ data=np.random.randn(4,5),
+ coords={'y': [3, 4, 5, 6, 7]},
+ dims=['x', 'y'])
+
+ kwargs = self.default_kwargs
+ kwargs.update(gridded=True)
+
+ data, x, y, by, groupby = process_xarray(data=da, **kwargs)
+ assert isinstance(data, xr.Dataset)
+ assert list(data.data_vars.keys()) == ['value']
+ assert x == 'y'
+ assert y == 'x'
+ assert by is None
+ assert groupby is None
+
+ def test_process_3d_xarray_dataset_with_coords(self):
+ import pandas as pd
+
+ data, x, y, by, groupby = process_xarray(data=self.ds, **self.default_kwargs)
+ assert isinstance(data, pd.DataFrame)
+ assert x == 'time'
+ assert y == ['air']
+ assert by == []
+ assert groupby == ['lon', 'lat']
+
+ def test_process_3d_xarray_dataset_with_coords_as_gridded(self):
+ import xarray as xr
+
+ kwargs = self.default_kwargs
+ kwargs.update(gridded=True, x='lon', y='lat')
+
+ data, x, y, by, groupby = process_xarray(data=self.ds, **kwargs)
+ assert isinstance(data, xr.Dataset)
+ assert list(data.data_vars.keys()) == ['air']
+ assert x == 'lon'
+ assert y == 'lat'
+ assert by is None
+ assert groupby == ['time']
| Getting an error on to_dataframe from a basic xarray.hvplot
xarray on latest master: 0.11.0+11.g3ae93ac
```python
import xarray as xr
import hvplot.xarray
ds = xr.tutorial.open_dataset('air_temperature')
ds.hvplot()
```
```python-traceback
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-95a80b7187c4> in <module>()
----> 1 ds.hvplot()
/Users/jsignell/hvplot/hvplot/__init__.py in __call__(self, x, y, kind, **kwds)
98 HoloViews object: Object representing the requested visualization
99 """
--> 100 return self._get_converter(x, y, kind, **kwds)(kind, x, y)
101
102 def _get_converter(self, x=None, y=None, kind=None, **kwds):
/Users/jsignell/hvplot/hvplot/__init__.py in _get_converter(self, x, y, kind, **kwds)
106 kind = kind or params.pop('kind', None)
107 return HoloViewsConverter(
--> 108 self._data, x, y, kind=kind, **params
109 )
110
/Users/jsignell/hvplot/hvplot/converter.py in __init__(self, data, x, y, kind, by, use_index, group_label, value_label, backlog, persist, use_dask, crs, fields, groupby, dynamic, width, height, shared_axes, grid, legend, rot, title, xlim, ylim, clim, xticks, yticks, logx, logy, loglog, hover, subplots, label, invert, stacked, colorbar, fontsize, colormap, datashade, rasterize, row, col, figsize, debug, xaxis, yaxis, framewise, aggregator, projection, global_extent, geo, precompute, flip_xaxis, flip_yaxis, dynspread, hover_cols, x_sampling, y_sampling, project, xlabel, ylabel, xformatter, yformatter, tools, **kwds)
150 self._process_data(kind, data, x, y, by, groupby, row, col,
151 use_dask, persist, backlog, label, value_label,
--> 152 hover_cols, kwds)
153 self.use_index = use_index
154 self.value_label = value_label
/Users/jsignell/hvplot/hvplot/converter.py in _process_data(self, kind, data, x, y, by, groupby, row, col, use_dask, persist, backlog, label, value_label, hover_cols, kwds)
372 data, x, y, by_new, groupby_new = process_xarray(data, x, y, by, groupby,
373 use_dask, persist, gridded,
--> 374 label, value_label)
375
376 if kind not in self._stats_types:
/Users/jsignell/hvplot/hvplot/util.py in process_xarray(data, x, y, by, groupby, use_dask, persist, gridded, label, value_label)
270 data = data.persist() if persist else data
271 else:
--> 272 data = dataset.to_dataframe(name=name)
273 if len(data.index.names) > 1:
274 data = data.reset_index()
TypeError: to_dataframe() got an unexpected keyword argument 'name'
```
| 2018-12-07T18:45:16 |
|
holoviz/hvplot | 131 | holoviz__hvplot-131 | [
"129"
] | b233531a639919f7505b79fec0494deecfd938cd | diff --git a/hvplot/__init__.py b/hvplot/__init__.py
--- a/hvplot/__init__.py
+++ b/hvplot/__init__.py
@@ -68,7 +68,7 @@ def patch(library, name='hvplot', extension=None, logo=False):
raise ImportError('Could not patch plotting API onto intake. '
'intake could not be imported.')
setattr(intake.source.base.DataSource, name, patch_property)
- if extension and not _hv.extension._loaded:
+ if extension and not getattr(_hv.extension, '_loaded', False):
_hv.extension(extension, logo=logo)
diff --git a/hvplot/intake.py b/hvplot/intake.py
--- a/hvplot/intake.py
+++ b/hvplot/intake.py
@@ -11,5 +11,5 @@
patch('intake', extension='bokeh')
patch('intake', 'plot')
else:
- if not _hv.extension._loaded:
+ if not getattr(_hv.extension, '_loaded', False):
_hv.extension('bokeh', logo=False)
| `import hvplot.pandas` fails with v0.3.0a7
Importing `hvplot.pandas` with
```python
import hvplot
print(hvplot.__version__) # '0.3.0a7'
import hvplot.pandas
```
results in an exception:
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/laurent/miniconda3/envs/hvplot-env/lib/python3.7/site-packages/hvplot/pandas.py", line 3, in <module>
patch('pandas', extension='bokeh')
File "/Users/laurent/miniconda3/envs/hvplot-env/lib/python3.7/site-packages/hvplot/__init__.py", line 71, in patch
if extension and not _hv.extension._loaded:
AttributeError: type object 'extension' has no attribute '_loaded'
```
Update: the above exception is raised for both holoviews 1.10.9 and 1.11.0a10.
| For those interested: the workaround I'm using is `from hvplot import hvPlot`, which obviates the need to monkey patch pandas.
For some additional background, which environment are you using hvplot with? I'm guessing the problem is that it's not a IPython or notebook environment, in which case there is no ``_loaded`` attribute defined. | 2018-12-19T13:59:58 |
|
holoviz/hvplot | 228 | holoviz__hvplot-228 | [
"227"
] | c4d29db1168cfdc127d90c95a8139e3923ec1a5c | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -1272,8 +1272,9 @@ def points(self, x=None, y=None, data=None):
data = self.data if data is None else data
params = dict(self._relabel)
- x = x or self.x
- y = y or self.y
+ x = x if x is not None else self.x
+ y = y if y is not None else self.y
+
if hasattr(data, 'geom_type') and not (x and y):
x, y = 'Longitude', 'Latitude'
@@ -1291,11 +1292,12 @@ def points(self, x=None, y=None, data=None):
element = self._get_element('points')
if self.geo: params['crs'] = self.crs
vdims = [self.kwds['c']] if 'c' in self.kwds else []
+ kdims = [x, y] if x is not None and y is not None else None
if 's' in self.kwds:
vdims.append(self.kwds['s'])
vdims = vdims + self.hover_cols
params['vdims'] = vdims
- return element(data, [x, y], **params).redim(**self._redim).redim.range(**ranges).opts(**opts)
+ return element(data, kdims, **params).redim(**self._redim).redim.range(**ranges).opts(**opts)
##########################
# Geometry plots #
| diff --git a/hvplot/tests/testcharts.py b/hvplot/tests/testcharts.py
--- a/hvplot/tests/testcharts.py
+++ b/hvplot/tests/testcharts.py
@@ -2,10 +2,35 @@
from parameterized import parameterized
from holoviews import NdOverlay, Store
-from holoviews.element import Curve, Area, Scatter
+from holoviews.element import Curve, Area, Scatter, Points
from holoviews.element.comparison import ComparisonTestCase
from hvplot import patch
+class TestChart2D(ComparisonTestCase):
+ def setUp(self):
+ try:
+ import pandas as pd
+ except:
+ raise SkipTest('Pandas not available')
+ patch('pandas')
+ self.df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], columns=['x', 'y'])
+ self.cat_df = pd.DataFrame([[1, 2, 'A'], [3, 4, 'B'], [5, 6, 'C']],
+ columns=['x', 'y', 'category'])
+
+ @parameterized.expand([('points', Points)])
+ def test_tidy_chart_defaults(self, kind, element):
+ plot = self.df.hvplot(kind=kind)
+ self.assertEqual(plot, element(self.df))
+
+ @parameterized.expand([('points', Points)])
+ def test_tidy_chart(self, kind, element):
+ plot = self.df.hvplot(x='x', y='y', kind=kind)
+ self.assertEqual(plot, element(self.df, ['x', 'y']))
+
+ @parameterized.expand([('points', Points)])
+ def test_tidy_chart_index_and_c(self, kind, element):
+ plot = self.df.hvplot(x='index', y='y', c='x', kind=kind)
+ self.assertEqual(plot, element(self.df, ['index', 'y'], ['x']))
class TestChart1D(ComparisonTestCase):
| points is broken with holoviews 1.12.3 bokeh 1.2.0
```python
pd.DataFrame([[1,2], [3,4]]).hvplot.points()
```
```python-traceback
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-32-c78e14955632> in <module>
----> 1 pd.DataFrame([[1,2], [3,4]]).hvplot.points()
~/scratch/examples/datashader_dashboard/envs/default/lib/python3.6/site-packages/hvplot/__init__.py in points(self, x, y, **kwds)
643 The HoloViews representation of the plot.
644 """
--> 645 return self(x, y, kind='points', **kwds)
646
647 def polygons(self, x=None, y=None, c=None, **kwds):
~/scratch/examples/datashader_dashboard/envs/default/lib/python3.6/site-packages/hvplot/__init__.py in __call__(self, x, y, kind, **kwds)
181 HoloViews object: Object representing the requested visualization
182 """
--> 183 return self._get_converter(x, y, kind, **kwds)(kind, x, y)
184
185 def _get_converter(self, x=None, y=None, kind=None, **kwds):
~/scratch/examples/datashader_dashboard/envs/default/lib/python3.6/site-packages/hvplot/converter.py in __call__(self, kind, x, y)
717 obj = DynamicMap(cbcallable, streams=[self.stream])
718 else:
--> 719 obj = method(x, y)
720
721 if self.crs and self.project:
~/scratch/examples/datashader_dashboard/envs/default/lib/python3.6/site-packages/hvplot/converter.py in points(self, x, y, data)
1277 vdims = vdims + self.hover_cols
1278 params['vdims'] = vdims
-> 1279 return element(data, [x, y], **params).redim(**self._redim).redim.range(**ranges).opts(**opts)
1280
1281 ##########################
~/scratch/examples/datashader_dashboard/envs/default/lib/python3.6/site-packages/holoviews/core/data/__init__.py in __init__(self, data, kdims, vdims, **kwargs)
204 kwargs.update([(l, pvals[l]) for l in ['group', 'label']
205 if l in pvals and l not in kwargs])
--> 206 kwargs.update(process_dimensions(kdims, vdims))
207 kdims, vdims = kwargs.get('kdims'), kwargs.get('vdims')
208
~/scratch/examples/datashader_dashboard/envs/default/lib/python3.6/site-packages/holoviews/core/dimension.py in process_dimensions(kdims, vdims)
125 raise ValueError('Dimensions must be defined as a tuple, '
126 'string, dictionary or Dimension instance, '
--> 127 'found a %s type.' % type(dim).__name__)
128 dimensions[group] = [asdim(d) for d in dims]
129 return dimensions
ValueError: Dimensions must be defined as a tuple, string, dictionary or Dimension instance, found a NoneType type.
```
| Turns out points requires x and y args. I am opening a PR to make that requirement raise a better error. | 2019-06-25T15:29:50 |
holoviz/hvplot | 247 | holoviz__hvplot-247 | [
"245"
] | 6ec2a6b0882e3d70a814ba266e21974f7e471edd | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -139,6 +139,7 @@ def package_assets(example_path):
'dask',
'datashader >=0.6.5',
'notebook >=5.4',
+ 'rasterio',
's3fs',
'scipy',
'pillow',
@@ -153,7 +154,7 @@ def package_assets(example_path):
'flake8',
'parameterized',
'pytest',
- 'nbsmoke >=0.2.0'
+ 'nbsmoke >=0.2.0',
],
'examples': _examples_extra,
'doc': _examples_extra + [
@@ -224,4 +225,3 @@ def package_assets(example_path):
if os.path.isdir(example_path):
shutil.rmtree(example_path)
-
| diff --git a/hvplot/tests/testgeo.py b/hvplot/tests/testgeo.py
--- a/hvplot/tests/testgeo.py
+++ b/hvplot/tests/testgeo.py
@@ -20,29 +20,29 @@ def assertCRS(self, plot, proj='utm'):
assert plot.crs.proj4_params['proj'] == proj
def test_plot_with_crs_as_object(self):
- plot = self.da.hvplot('x', 'y', crs=self.crs)
+ plot = self.da.hvplot.image('x', 'y', crs=self.crs)
self.assertCRS(plot)
def test_plot_with_crs_as_proj_string(self):
- plot = self.da.hvplot('x', 'y', crs=self.da.crs)
+ plot = self.da.hvplot.image('x', 'y', crs=self.da.crs)
self.assertCRS(plot)
def test_plot_with_geo_as_true_crs_undefined(self):
- plot = self.da.hvplot('x', 'y', geo=True)
+ plot = self.da.hvplot.image('x', 'y', geo=True)
self.assertCRS(plot)
def test_plot_with_crs_as_attr_str(self):
da = self.da.copy()
da.attrs = {'bar': self.crs}
- plot = da.hvplot('x', 'y', crs='bar')
+ plot = da.hvplot.image('x', 'y', crs='bar')
self.assertCRS(plot)
def test_plot_with_crs_as_nonexistent_attr_str(self):
with self.assertRaisesRegex(ValueError, "'foo' must be"):
- self.da.hvplot('x', 'y', crs='foo')
+ self.da.hvplot.image('x', 'y', crs='foo')
def test_plot_with_geo_as_true_crs_no_crs_on_data_returns_default(self):
da = self.da.copy()
da.attrs = {'bar': self.crs}
- plot = da.hvplot('x', 'y', geo=True)
+ plot = da.hvplot.image('x', 'y', geo=True)
self.assertCRS(plot, 'eqc')
diff --git a/hvplot/tests/testutil.py b/hvplot/tests/testutil.py
--- a/hvplot/tests/testutil.py
+++ b/hvplot/tests/testutil.py
@@ -25,7 +25,8 @@ def setUp(self):
'groupby': None,
'y': None,
'x': None,
- 'by': None
+ 'by': None,
+ 'other_dims': [],
}
self.ds = xr.tutorial.open_dataset('air_temperature')
| Run more tests on CI
I just realized that we haven't been running all our unit tests on CI. I think we should at least have one job that has `xarray, rasterio, geoviews, cartopy, dask, pandas, and streamz`
This will probably also help with our coverage. Subset from https://travis-ci.org/pyviz/hvplot/jobs/560028289:
```
Name Stmts Miss Cover
-----------------------------------------
hvplot/__init__.py 251 123 51%
hvplot/converter.py 900 537 40%
hvplot/util.py 193 156 19%
hvplot/utilities.py 28 22 21%
-----------------------------------------
TOTAL 1372 838 39%
----------------------------------------------------------------------
Ran 74 tests in 1.296s
OK (SKIP=27)
```
These ones are failing locally for me on master:
```python-traceback
test_process_1d_xarray_dataarray_with_coords (hvplot.tests.testutil.TestProcessXarray) ... ERROR
test_process_1d_xarray_dataarray_with_coords_and_name (hvplot.tests.testutil.TestProcessXarray) ... ERROR
test_process_1d_xarray_dataarray_with_no_coords (hvplot.tests.testutil.TestProcessXarray) ... ERROR
test_process_2d_xarray_dataarray_with_coords_as_gridded (hvplot.tests.testutil.TestProcessXarray) ... ERROR
test_process_2d_xarray_dataarray_with_no_coords (hvplot.tests.testutil.TestProcessXarray) ... ERROR
test_process_2d_xarray_dataarray_with_no_coords_as_gridded (hvplot.tests.testutil.TestProcessXarray) ... ERROR
test_process_3d_xarray_dataset_with_coords (hvplot.tests.testutil.TestProcessXarray) ... ERROR
test_process_3d_xarray_dataset_with_coords_as_gridded (hvplot.tests.testutil.TestProcessXarray) ... ERROR
======================================================================
ERROR: test_process_1d_xarray_dataarray_with_coords (hvplot.tests.testutil.TestProcessXarray)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/jsignell/hvplot/hvplot/tests/testutil.py", line 55, in test_process_1d_xarray_dataarray_with_coords
data, x, y, by, groupby = process_xarray(data=da, **self.default_kwargs)
TypeError: process_xarray() missing 1 required positional argument: 'other_dims'
======================================================================
ERROR: test_process_1d_xarray_dataarray_with_coords_and_name (hvplot.tests.testutil.TestProcessXarray)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/jsignell/hvplot/hvplot/tests/testutil.py", line 72, in test_process_1d_xarray_dataarray_with_coords_and_name
data, x, y, by, groupby = process_xarray(data=da, **self.default_kwargs)
TypeError: process_xarray() missing 1 required positional argument: 'other_dims'
======================================================================
ERROR: test_process_1d_xarray_dataarray_with_no_coords (hvplot.tests.testutil.TestProcessXarray)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/jsignell/hvplot/hvplot/tests/testutil.py", line 39, in test_process_1d_xarray_dataarray_with_no_coords
data, x, y, by, groupby = process_xarray(data=da, **self.default_kwargs)
TypeError: process_xarray() missing 1 required positional argument: 'other_dims'
======================================================================
ERROR: test_process_2d_xarray_dataarray_with_coords_as_gridded (hvplot.tests.testutil.TestProcessXarray)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/jsignell/hvplot/hvplot/tests/testutil.py", line 119, in test_process_2d_xarray_dataarray_with_coords_as_gridded
data, x, y, by, groupby = process_xarray(data=da, **kwargs)
TypeError: process_xarray() missing 1 required positional argument: 'other_dims'
======================================================================
ERROR: test_process_2d_xarray_dataarray_with_no_coords (hvplot.tests.testutil.TestProcessXarray)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/jsignell/hvplot/hvplot/tests/testutil.py", line 85, in test_process_2d_xarray_dataarray_with_no_coords
data, x, y, by, groupby = process_xarray(data=da, **self.default_kwargs)
TypeError: process_xarray() missing 1 required positional argument: 'other_dims'
======================================================================
ERROR: test_process_2d_xarray_dataarray_with_no_coords_as_gridded (hvplot.tests.testutil.TestProcessXarray)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/jsignell/hvplot/hvplot/tests/testutil.py", line 100, in test_process_2d_xarray_dataarray_with_no_coords_as_gridded
data, x, y, by, groupby = process_xarray(data=da, **kwargs)
TypeError: process_xarray() missing 1 required positional argument: 'other_dims'
======================================================================
ERROR: test_process_3d_xarray_dataset_with_coords (hvplot.tests.testutil.TestProcessXarray)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/jsignell/hvplot/hvplot/tests/testutil.py", line 130, in test_process_3d_xarray_dataset_with_coords
data, x, y, by, groupby = process_xarray(data=self.ds, **self.default_kwargs)
TypeError: process_xarray() missing 1 required positional argument: 'other_dims'
======================================================================
ERROR: test_process_3d_xarray_dataset_with_coords_as_gridded (hvplot.tests.testutil.TestProcessXarray)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/jsignell/hvplot/hvplot/tests/testutil.py", line 143, in test_process_3d_xarray_dataset_with_coords_as_gridded
data, x, y, by, groupby = process_xarray(data=self.ds, **kwargs)
TypeError: process_xarray() missing 1 required positional argument: 'other_dims'
----------------------------------------------------------------------
Ran 80 tests in 8.333s
FAILED (SKIP=10, errors=8)
```
| 2019-07-17T19:41:46 |
|
holoviz/hvplot | 249 | holoviz__hvplot-249 | [
"241"
] | 56605e58d22d28721c72f63645531399093c3404 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -919,6 +919,12 @@ def _process_args(self, data, x, y):
y = y or self.y
if not y:
ys = [c for c in data.columns if c not in [x]+self.by+self.groupby]
+ if len(ys) > 1:
+ # if columns have different dtypes, only include numeric columns
+ from pandas.api.types import is_numeric_dtype as isnum
+ num_ys = [col for col in data.columns if isnum(data[col])]
+ if len(num_ys) >= 1:
+ ys = num_ys
y = ys[0] if len(ys) == 1 else ys
return data, x, y
| diff --git a/hvplot/tests/testcharts.py b/hvplot/tests/testcharts.py
--- a/hvplot/tests/testcharts.py
+++ b/hvplot/tests/testcharts.py
@@ -36,9 +36,9 @@ def test_heatmap_2d_index_columns(self):
plot = self.df.hvplot.heatmap()
self.assertEqual(plot, HeatMap((['x', 'y'], [0, 1, 2], self.df.values),
['columns', 'index'], 'value'))
-
-
+
+
class TestChart1D(ComparisonTestCase):
def setUp(self):
@@ -50,6 +50,8 @@ def setUp(self):
self.df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], columns=['x', 'y'])
self.cat_df = pd.DataFrame([[1, 2, 'A'], [3, 4, 'B'], [5, 6, 'C']],
columns=['x', 'y', 'category'])
+ self.cat_only_df = pd.DataFrame([['A', 'a'], ['B', 'b'], ['C', 'c']],
+ columns=['upper', 'lower'])
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_wide_chart(self, kind, element):
@@ -134,3 +136,32 @@ def test_histogram_legend_position(self):
plot = self.cat_df.hvplot.hist('y', legend='left')
opts = Store.lookup_options('bokeh', plot, 'plot')
self.assertEqual(opts.kwargs['legend_position'], 'left')
+
+ @parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
+ def test_only_includes_num_chart(self, kind, element):
+ plot = self.cat_df.hvplot(kind=kind)
+ obj = NdOverlay({'x': element(self.cat_df, 'index', 'x').redim(x='value'),
+ 'y': element(self.cat_df, 'index', 'y').redim(y='value'),
+ }, 'Variable')
+ self.assertEqual(plot, obj)
+
+ @parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
+ def test_includes_str_if_no_num_chart(self, kind, element):
+ plot = self.cat_only_df.hvplot(kind=kind)
+ obj = NdOverlay({'upper': element(self.cat_only_df, 'index', 'upper').redim(upper='value'),
+ 'lower': element(self.cat_only_df, 'index', 'lower').redim(lower='value'),
+ }, 'Variable')
+ self.assertEqual(plot, obj)
+
+class TestChart1DDask(TestChart1D):
+
+ def setUp(self):
+ super().setUp()
+ try:
+ import dask.dataframe as dd
+ except:
+ raise SkipTest('Dask not available')
+ patch('dask')
+ self.df = dd.from_pandas(self.df, npartitions=2)
+ self.cat_df = dd.from_pandas(self.cat_df, npartitions=3)
+ self.cat_only_df = dd.from_pandas(self.cat_only_df, npartitions=1)
| Line plot should ignore non-numeric column
I got this example:
```python
df = pandas.DataFrame({'foo': [1, 2, 3, 4],
'bar': [3, 1, 4, 2],
'label': list('xyzh')},
index=list('abcd'))
df.hvplot(kind='line')
```
Which is generating the next plot:

Not sure about other cases, but I think in this case it'd make sense to ignore the `label` column, instead of adding its values to the y axis. That's what the pandas matplotlib backend does.
| One way to generalize this is that when data are plotted on the same axis they should all either be numeric or non-numeric.
>One way to generalize this is that when data are plotted on the same axis they should all either be numeric or non-numeric.
I agree with this, line plots on categorical axes are weird but I think it's fine not to be prescriptive here. Unless a non-numeric columns is explicitly specified numeric columns should take precedence though. | 2019-07-19T19:10:53 |
holoviz/hvplot | 256 | holoviz__hvplot-256 | [
"232"
] | 4685be24e42f476a6d963a84abda6c588646bc06 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -164,6 +164,27 @@ class HoloViewsConverter(object):
returning an aggregated Image
x_sampling/y_sampling (default=None):
Declares a minimum sampling density beyond.
+
+ Geographic options
+ ------------------
+ coastline (default=False):
+ Whether to display a coastline on top of the plot, setting
+ coastline='10m'/'50m'/'110m' specifies a specific scale.
+ crs (default=None):
+ Coordinate reference system of the data specified as Cartopy
+ CRS object, proj.4 string or EPSG code.
+ geo (default=False):
+ Whether the plot should be treated as geographic (and assume
+ PlateCarree, i.e. lat/lon coordinates).
+ global_extent (default=False):
+ Whether to expand the plot extent to span the whole globe.
+ project (default=False):
+ Whether to project the data before plotting (adds initial
+ overhead but avoids projecting data when plot is dynamically
+ updated).
+ tiles (default=False):
+ Whether to overlay the plot on a tile source. Tiles sources
+ can be selected by name, the default is 'Wikipedia'.
"""
_gridded_types = ['image', 'contour', 'contourf', 'quadmesh', 'rgb', 'points']
@@ -249,7 +270,7 @@ def __init__(self, data, x, y, kind=None, by=None, use_index=True,
clabel=None, xformatter=None, yformatter=None, tools=[],
padding=None, responsive=False, min_width=None,
min_height=None, max_height=None, max_width=None,
- attr_labels=True, **kwds):
+ attr_labels=True, coastline=False, tiles=False, **kwds):
# Process data and related options
self._redim = fields
@@ -263,6 +284,8 @@ def __init__(self, data, x, y, kind=None, by=None, use_index=True,
self.geo = geo or crs or global_extent or projection or project
self.crs = self._process_crs(data, crs) if self.geo else None
self.project = project
+ self.coastline = coastline
+ self.tiles = tiles
self.row = row
self.col = col
@@ -809,7 +832,7 @@ def __call__(self, kind, x, y):
obj = project(obj, projection=projection)
if not (self.datashade or self.rasterize):
- return obj
+ return self._apply_layers(obj)
try:
from holoviews.operation.datashader import datashade, rasterize, dynspread
@@ -861,7 +884,31 @@ def __call__(self, kind, x, y):
else:
param.main.warning('dynspread may only be applied on datashaded plots, '
'use datashade=True instead of rasterize=True.')
- return processed.opts({eltype: {'plot': self._plot_opts, 'style': style}})
+ return self._apply_layers(processed).opts({eltype: {'plot': self._plot_opts, 'style': style}})
+
+ def _apply_layers(self, obj):
+ if self.coastline:
+ import geoviews as gv
+ coastline = gv.feature.coastline()
+ if self.coastline in ['10m', '50m', '110m']:
+ coastline = coastline.opts(scale=self.coastline)
+ elif self.coastline is not True:
+ param.main.warning("coastline scale of %s not recognized, "
+ "must be one of '10m', '50m' or '110m'." %
+ self.coastline)
+ obj = obj * coastline
+ if self.tiles:
+ tile_source = 'EsriImagery' if self.tiles == 'ESRI' else self.tiles
+ if tile_source in hv.element.tile_sources:
+ tiles = hv.element.tile_sources[tile_source]()
+ else:
+ tiles = hv.element.tiles.Wikipedia()
+ if tile_source is not True:
+ param.main.warning(
+ "%s tiles not recognized, must be one of: %s" %
+ (tile_source, sorted(hv.element.tile_sources)))
+ obj = tiles * obj
+ return obj
def _merge_redim(self, ranges, attr='range'):
redim = dict(self._redim)
| diff --git a/hvplot/tests/testgeo.py b/hvplot/tests/testgeo.py
--- a/hvplot/tests/testgeo.py
+++ b/hvplot/tests/testgeo.py
@@ -2,6 +2,7 @@
import numpy as np
import pandas as pd
+import holoviews as hv
class TestGeo(TestCase):
@@ -53,6 +54,43 @@ def test_plot_with_geo_as_true_crs_no_crs_on_data_returns_default(self):
self.assertCRS(plot, 'eqc')
+class TestGeoAnnotation(TestCase):
+
+ def setUp(self):
+ try:
+ import geoviews # noqa
+ import cartopy.crs as ccrs # noqa
+ except:
+ raise SkipTest('geoviews or cartopy not available')
+ import hvplot.pandas # noqa
+ self.crs = ccrs.PlateCarree()
+ self.df = pd.DataFrame(np.random.rand(10, 2), columns=['x', 'y'])
+
+ def test_plot_with_coastline(self):
+ import geoviews as gv
+ plot = self.df.hvplot.points('x', 'y', geo=True, coastline=True)
+ self.assertEqual(len(plot), 2)
+ coastline = plot.get(1)
+ self.assertIsInstance(coastline, gv.Feature)
+
+ def test_plot_with_coastline_scale(self):
+ plot = self.df.hvplot.points('x', 'y', geo=True, coastline='10m')
+ opts = plot.get(1).opts.get('plot')
+ self.assertEqual(opts.kwargs, {'scale': '10m'})
+
+ def test_plot_with_tiles(self):
+ plot = self.df.hvplot.points('x', 'y', geo=True, tiles=True)
+ self.assertEqual(len(plot), 2)
+ self.assertIsInstance(plot.get(0), hv.Tiles)
+ self.assertIn('wikimedia', plot.get(0).data)
+
+ def test_plot_with_specific_tiles(self):
+ plot = self.df.hvplot.points('x', 'y', geo=True, tiles='ESRI')
+ self.assertEqual(len(plot), 2)
+ self.assertIsInstance(plot.get(0), hv.Tiles)
+ self.assertIn('ArcGIS', plot.get(0).data)
+
+
class TestGeoElements(TestCase):
def setUp(self):
| coastline=True keyword similar to grid?
I think it's helpful to be able to quickly call a frame of reference.
`ds.hvplot('lon', 'lat', geo=True, coastline=True)`
| I like this idea. | 2019-07-23T10:58:07 |
holoviz/hvplot | 258 | holoviz__hvplot-258 | [
"231"
] | 0041d4fbf74364f29a0757e04c11ad27786f8c6a | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -118,7 +118,7 @@ class HoloViewsConverter(object):
rot: number
Rotates the axis ticks along the x-axis by the specified
number of degrees.
- shared_axes (default=False): boolean
+ shared_axes (default=True): boolean
Whether to link axes between plots
title (default=''): str
Title for the plot
@@ -208,7 +208,7 @@ class HoloViewsConverter(object):
'yaxis', 'xformatter', 'yformatter', 'xlabel', 'ylabel',
'clabel', 'padding']
- _style_options = ['color', 'alpha', 'colormap', 'fontsize', 'c']
+ _style_options = ['color', 'alpha', 'colormap', 'fontsize', 'c', 'cmap']
_op_options = ['datashade', 'rasterize', 'x_sampling', 'y_sampling',
'aggregator']
@@ -252,26 +252,33 @@ class HoloViewsConverter(object):
"bottom_right", "right", "left", "top",
"bottom")
+ _default_plot_opts = {
+ 'logx': False, 'logy': False, 'show_legend': True, 'legend_position': 'right',
+ 'show_grid': False, 'responsive': False, 'shared_axes': True}
+
+ _default_cmaps = {
+ 'linear': 'kbc_r',
+ 'categorical': 'Category10',
+ 'cyclic': 'colorwheel',
+ 'diverging': 'coolwarm'
+ }
+
def __init__(self, data, x, y, kind=None, by=None, use_index=True,
group_label='Variable', value_label='value',
backlog=1000, persist=False, use_dask=False,
crs=None, fields={}, groupby=None, dynamic=True,
- width=None, height=None, shared_axes=True,
- grid=False, legend=True, rot=None, title=None,
- xlim=None, ylim=None, clim=None, xticks=None, yticks=None,
- logx=False, logy=False, loglog=False, hover=True,
+ grid=None, legend=None, rot=None, title=None,
+ xlim=None, ylim=None, clim=None,
+ logx=None, logy=None, loglog=None, hover=True,
subplots=False, label=None, invert=False,
stacked=False, colorbar=None, fontsize=None,
- colormap=None, datashade=False, rasterize=False,
+ datashade=False, rasterize=False,
row=None, col=None, figsize=None, debug=False,
- xaxis=True, yaxis=True, framewise=True, aggregator=None,
- projection=None, global_extent=False, geo=False,
- precompute=False, flip_xaxis=False, flip_yaxis=False,
+ framewise=True, aggregator=None,
+ projection=None, global_extent=None, geo=False,
+ precompute=False, flip_xaxis=None, flip_yaxis=None,
dynspread=False, hover_cols=[], x_sampling=None,
- y_sampling=None, project=False, xlabel=None, ylabel=None,
- clabel=None, xformatter=None, yformatter=None, tools=[],
- padding=None, responsive=False, min_width=None,
- min_height=None, max_height=None, max_width=None,
+ y_sampling=None, project=False, tools=[],
attr_labels=True, coastline=False, tiles=False,
sort_date=True, **kwds):
@@ -337,75 +344,64 @@ def __init__(self, data, x, y, kind=None, by=None, use_index=True,
# Process options
self.stacked = stacked
- style_opts, plot_opts, kwds = self._process_style(colormap, kwds)
+ self._style_opts, kwds = self._process_style(kwds)
+
+ plot_opts = {**self._default_plot_opts,
+ **self._process_plot(self._style_opts.get('color'))}
+ if xlim is not None:
+ plot_opts['xlim'] = tuple(xlim)
+ if ylim is not None:
+ plot_opts['ylim'] = tuple(ylim)
+
self.invert = invert
- plot_opts['logx'] = logx or loglog
- plot_opts['logy'] = logy or loglog
- plot_opts['show_grid'] = grid
- plot_opts['shared_axes'] = shared_axes
- plot_opts['show_legend'] = bool(legend)
+ if loglog is not None:
+ logx = logx or loglog
+ logy = logy or loglog
+ if logx is not None:
+ plot_opts['logx'] = logx
+ if logy is not None:
+ plot_opts['logy'] = logy
+
+ if grid is not None:
+ plot_opts['show_grid'] = grid
+
+ if legend is not None:
+ plot_opts['show_legend'] = bool(legend)
if legend in self._legend_positions:
plot_opts['legend_position'] = legend
- elif legend in (True, False, None):
- plot_opts['legend_position'] = 'right'
- else:
+ elif legend not in (True, False, None):
raise ValueError('The legend option should be a boolean or '
'a valid legend position (i.e. one of %s).'
% list(self._legend_positions))
- if xticks:
- plot_opts['xticks'] = xticks
- if yticks:
- plot_opts['yticks'] = yticks
- if not xaxis:
- plot_opts['xaxis'] = None
- elif xaxis != True:
- plot_opts['xaxis'] = xaxis
- if not yaxis:
- plot_opts['yaxis'] = None
- elif yaxis != True:
- plot_opts['yaxis'] = yaxis
- if xlabel is not None:
- plot_opts['xlabel'] = xlabel
- if ylabel is not None:
- plot_opts['ylabel'] = ylabel
- if clabel is not None:
- plot_opts['clabel'] = clabel
- if xlim is not None:
- plot_opts['xlim'] = tuple(xlim)
- if ylim is not None:
- plot_opts['ylim'] = tuple(ylim)
- if padding is not None:
- plot_opts['padding'] = padding
- if xformatter is not None:
- plot_opts['xformatter'] = xformatter
- if yformatter is not None:
- plot_opts['yformatter'] = yformatter
+ plotwds = ['xticks', 'yticks', 'xlabel', 'ylabel', 'clabel',
+ 'padding', 'xformatter', 'yformatter',
+ 'height', 'width',
+ 'min_width', 'min_height', 'max_width', 'max_height',
+ 'fontsize', 'responsive', 'shared_axes']
+ for plotwd in plotwds:
+ if plotwd in kwds:
+ plot_opts[plotwd] = kwds.pop(plotwd)
+
+ for axis_name in ['xaxis', 'yaxis']:
+ if axis_name in kwds:
+ axis = kwds.pop(axis_name)
+ if not axis:
+ plot_opts[axis_name] = None
+ elif axis != True:
+ plot_opts[axis_name] = axis
+ elif axis_name in plot_opts:
+ plot_opts.pop(axis_name, None)
+
if flip_xaxis:
plot_opts['invert_xaxis'] = True
if flip_yaxis:
plot_opts['invert_yaxis'] = True
- if responsive:
- if width:
- plot_opts['width'] = width
- if height:
- plot_opts['height'] = height
- else:
- plot_opts['width'] = width or 700
- plot_opts['height'] = height or 300
- if min_width is not None:
- plot_opts['min_width'] = min_width
- if min_height is not None:
- plot_opts['min_height'] = min_height
- if max_width is not None:
- plot_opts['max_width'] = max_width
- if max_height is not None:
- plot_opts['max_height'] = max_height
- if responsive:
- plot_opts['responsive'] = responsive
- if fontsize:
- plot_opts['fontsize'] = fontsize
+ if not plot_opts.get('responsive', True):
+ plot_opts['width'] = plot_opts.get('width', 700)
+ plot_opts['height'] = plot_opts.get('height', 300)
+
if isinstance(colorbar, bool):
plot_opts['colorbar'] = colorbar
elif self.kind in self._colorbar_types:
@@ -418,7 +414,7 @@ def __init__(self, data, x, y, kind=None, by=None, use_index=True,
axis = 'yrotation' if invert else 'xrotation'
plot_opts[axis] = rot
- tools = list(tools)
+ tools = list(tools) or list(plot_opts.get('tools', []))
if hover and not any(t for t in tools if isinstance(t, HoverTool)
or t == 'hover'):
tools.append('hover')
@@ -430,21 +426,12 @@ def __init__(self, data, x, y, kind=None, by=None, use_index=True,
plot_opts['projection'] = process_crs(projection)
if title is not None:
plot_opts['title_format'] = title
+
self._plot_opts = plot_opts
- self._overlay_opts = {k: v for k, v in plot_opts.items()
+ self._overlay_opts = {k: v for k, v in self._plot_opts.items()
if k in OverlayPlot.params()}
- options = Store.options(backend='bokeh')
- el_type = self._kind_mapping[self.kind].__name__
- if el_type in options:
- style = options[el_type].groups['style']
- cycled_opts = [k for k, v in style.kwargs.items() if isinstance(v, Cycle)]
- for opt in cycled_opts:
- color = style_opts.get('color', None)
- if color is None:
- color = process_cmap(colormap or 'Category10', categorical=True)
- style_opts[opt] = Cycle(values=color) if isinstance(color, list) else color
- self._style_opts = style_opts
- self._norm_opts = {'framewise': framewise, 'axiswise': not shared_axes}
+
+ self._norm_opts = {'framewise': framewise, 'axiswise': not plot_opts.get('shared_axes')}
self.kwds = kwds
# Process dimensions and labels
@@ -668,34 +655,51 @@ def _process_data(self, kind, data, x, y, by, groupby, row, col,
'because {e}; suppress this warning '
'with attr_labels=False.'.format(e=e))
- def _process_style(self, colormap, kwds):
- plot_options = {}
+ def _process_plot(self, color):
+ kind = self.kind
+ options = Store.options(backend='bokeh')
+ elname = self._kind_mapping[kind].__name__
+ plot_opts = options[elname].groups['plot'].options if elname in options else {}
+
+ if kind.startswith('bar'):
+ plot_opts['stacked'] = self.stacked
+
+ # Color
+ if color is not None and 'colorbar' not in plot_opts:
+ if 'c' in self._kind_options.get(kind, []) and (color in self.variables):
+ if self.data[color].dtype.kind not in 'OSU':
+ plot_opts['colorbar'] = True
+
+ return plot_opts
+
+ def _process_style(self, kwds):
kind = self.kind
eltype = self._kind_mapping[kind]
- if eltype in Store.registry['bokeh']:
- valid_opts = Store.registry['bokeh'][eltype].style_opts
+ registry = Store.registry['bokeh']
+
+ if eltype in registry:
+ valid_opts = registry[eltype].style_opts
else:
valid_opts = []
for opt in valid_opts:
- if opt not in kwds or not isinstance(kwds[opt], list) or opt == 'cmap':
+ if opt not in kwds or not isinstance(kwds[opt], list) or opt in ['cmap', 'colormap']:
continue
kwds[opt] = Cycle(kwds[opt])
- style_opts = {kw: kwds[kw] for kw in list(kwds) if kw in valid_opts}
-
# Process style options
- if 'cmap' in kwds and colormap:
+ options = Store.options(backend='bokeh')
+ elname = eltype.__name__
+ style = options[elname].groups['style'].kwargs if elname in options else {}
+ style_opts = {k: v for k, v in style.items() if not isinstance(v, Cycle) and k != 'cmap'}
+ style_opts.update(**{k: v for k, v in kwds.items() if k in valid_opts})
+
+ # Color
+ if 'cmap' in kwds and 'colormap' in kwds:
raise TypeError("Only specify one of `cmap` and `colormap`.")
- elif 'cmap' in kwds:
- cmap = kwds.pop('cmap')
- else:
- cmap = colormap
- if kind.startswith('bar'):
- plot_options['stacked'] = self.stacked
+ cmap = kwds.pop('cmap', kwds.pop('colormap', None))
- # Color
if 'color' in kwds or 'c' in kwds:
color = kwds.pop('color', kwds.pop('c', None))
if isinstance(color, (np.ndarray, pd.Series)):
@@ -707,9 +711,20 @@ def _process_style(self, colormap, kwds):
style_opts['color'] = color
if 'c' in self._kind_options.get(kind, []) and (color in self.variables):
if self.data[color].dtype.kind in 'OSU':
- cmap = cmap or 'Category10'
+ cmap = cmap or self._default_cmaps['categorical']
else:
- plot_options['colorbar'] = True
+ cmap = cmap or self._default_cmaps['linear']
+
+ if cmap in self._default_cmaps:
+ cmap = self._default_cmaps[cmap]
+
+ if cmap is not None:
+ style_opts['cmap'] = cmap
+
+ color = style_opts.get('color', process_cmap(cmap or self._default_cmaps['categorical'], categorical=True))
+ for k, v in style.items():
+ if isinstance(v, Cycle):
+ style_opts[k] = Cycle(values=color) if isinstance(color, list) else color
# Size
if 'size' in kwds or 's' in kwds:
@@ -728,15 +743,7 @@ def _process_style(self, colormap, kwds):
if 'marker' in kwds and 'marker' in self._kind_options[self.kind]:
style_opts['marker'] = kwds.pop('marker')
- # Alpha
- if 'marker' in kwds:
- style_opts['alpha'] = kwds.pop('alpha')
-
- if cmap:
- style_opts['cmap'] = cmap
-
- return style_opts, plot_options, kwds
-
+ return style_opts, kwds
def _validate_kwds(self, kwds):
kind_opts = self._kind_options.get(self.kind, [])
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -123,6 +123,7 @@ def package_assets(example_path):
install_requires = [
'bokeh >=1.0.0',
+ 'colorcet',
'holoviews >=1.11.0',
'pandas'
]
| diff --git a/hvplot/tests/testdefaults.py b/hvplot/tests/testdefaults.py
deleted file mode 100644
--- a/hvplot/tests/testdefaults.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import pandas as pd
-from hvplot import hvPlot, patch
-from holoviews import Store, Scatter
-from holoviews.element.comparison import ComparisonTestCase
-
-
-class TestDefaults(ComparisonTestCase):
-
- def setUp(self):
- patch('pandas')
- self.df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], columns=['x', 'y'])
-
- def test_define_default_options(self):
- hvplot = hvPlot(self.df, width=42, height=42)
- curve = hvplot(y='y')
- opts = Store.lookup_options('bokeh', curve, 'plot')
- self.assertEqual(opts.options.get('width'), 42)
- self.assertEqual(opts.options.get('height'), 42)
-
- def test_define_custom_method(self):
- hvplot = hvPlot(self.df, {'custom_scatter': {'width': 42, 'height': 42}})
- custom_scatter = hvplot.custom_scatter(y='y')
- scatter = hvplot.scatter(y='y')
- custom_opts = Store.lookup_options('bokeh', custom_scatter, 'plot')
- opts = Store.lookup_options('bokeh', scatter, 'plot')
- self.assertEqual(custom_opts.options.get('width'), 42)
- self.assertEqual(custom_opts.options.get('height'), 42)
- self.assertNotEqual(opts.options.get('width'), 42)
- self.assertNotEqual(opts.options.get('height'), 42)
-
- def test_define_customize_method(self):
- hvplot = hvPlot(self.df, {'scatter': {'width': 42, 'height': 42}})
- custom_scatter = hvplot.scatter(y='y')
- curve = hvplot.line(y='y')
- custom_opts = Store.lookup_options('bokeh', custom_scatter, 'plot')
- opts = Store.lookup_options('bokeh', curve, 'plot')
- self.assertEqual(custom_opts.options.get('width'), 42)
- self.assertEqual(custom_opts.options.get('height'), 42)
- self.assertNotEqual(opts.options.get('width'), 42)
- self.assertNotEqual(opts.options.get('height'), 42)
-
- def test_attempt_to_override_kind_on_method(self):
- hvplot = hvPlot(self.df, {'scatter': {'kind': 'line'}})
- self.assertIsInstance(hvplot.scatter(y='y'), Scatter)
diff --git a/hvplot/tests/testoptions.py b/hvplot/tests/testoptions.py
--- a/hvplot/tests/testoptions.py
+++ b/hvplot/tests/testoptions.py
@@ -1,10 +1,12 @@
-from unittest import SkipTest
+from unittest import SkipTest, expectedFailure
from parameterized import parameterized
from holoviews import Store
+from holoviews.core.options import Options, OptionTree
from holoviews.element.comparison import ComparisonTestCase
from hvplot import patch
+import holoviews as hv
class TestOptions(ComparisonTestCase):
@@ -14,10 +16,20 @@ def setUp(self):
import pandas as pd
except:
raise SkipTest('Pandas not available')
+ self.backend = 'bokeh'
+ hv.extension(self.backend)
+ Store.current_backend = self.backend
+ self.store_copy = OptionTree(sorted(Store.options().items()),
+ groups=Options._option_groups)
patch('pandas')
self.df = pd.DataFrame([[1, 2, 'A', 0.1], [3, 4, 'B', 0.2], [5, 6, 'C', 0.3]],
columns=['x', 'y', 'category', 'number'])
+ def tearDown(self):
+ Store.options(val=self.store_copy)
+ Store._custom_options = {k:{} for k in Store._custom_options.keys()}
+ super(TestOptions, self).tearDown()
+
def test_scatter_legend_position(self):
plot = self.df.hvplot.scatter('x', 'y', c='category', legend='left')
opts = Store.lookup_options('bokeh', plot, 'plot')
@@ -88,3 +100,130 @@ def test_alpha_dim_overlay(self, kind):
self.assertEqual(opts.kwargs['alpha'], 'number')
self.assertIn('number', plot.last.vdims)
+ def test_hvplot_defaults(self):
+ plot = self.df.hvplot.scatter('x', 'y', c='category')
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['show_legend'], True)
+ self.assertEqual(opts.kwargs['legend_position'], 'right')
+ self.assertEqual(opts.kwargs['show_grid'], False)
+ self.assertEqual(opts.kwargs['responsive'], False)
+ self.assertEqual(opts.kwargs['shared_axes'], True)
+ self.assertEqual(opts.kwargs['height'], 300)
+ self.assertEqual(opts.kwargs['width'], 700)
+ self.assertEqual(opts.kwargs['logx'], False)
+ self.assertEqual(opts.kwargs['logy'], False)
+ self.assertEqual(opts.kwargs.get('logz'), None)
+
+ def test_holoviews_defined_default_opts(self):
+ hv.opts.defaults(hv.opts.Scatter( height=400, width=900 ,show_grid=True))
+ plot = self.df.hvplot.scatter('x', 'y', c='category')
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['legend_position'], 'right')
+ self.assertEqual(opts.kwargs['show_grid'], True)
+ self.assertEqual(opts.kwargs['height'], 400)
+ self.assertEqual(opts.kwargs['width'], 900)
+
+ def test_holoviews_defined_default_opts_overwritten_in_call(self):
+ hv.opts.defaults(hv.opts.Scatter(height=400, width=900, show_grid=True))
+ plot = self.df.hvplot.scatter('x', 'y', c='category', width=300, legend='left')
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['legend_position'], 'left')
+ self.assertEqual(opts.kwargs['show_grid'], True)
+ self.assertEqual(opts.kwargs['height'], 400)
+ self.assertEqual(opts.kwargs['width'], 300)
+
+ def test_holoviews_defined_default_opts_are_not_mutable(self):
+ hv.opts.defaults(hv.opts.Scatter(tools=['tap']))
+ plot = self.df.hvplot.scatter('x', 'y', c='category')
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['tools'], ['tap', 'hover'])
+ default_opts = Store.options(backend='bokeh')['Scatter'].groups['plot'].options
+ self.assertEqual(default_opts['tools'], ['tap'])
+
+ def test_axis_set_to_visible_by_default(self):
+ plot = self.df.hvplot.scatter('x', 'y', c='category')
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ assert 'xaxis' not in opts.kwargs
+ assert 'yaxis' not in opts.kwargs
+
+ def test_axis_set_to_none(self):
+ plot = self.df.hvplot.scatter('x', 'y', c='category', xaxis=None, yaxis=None)
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['xaxis'], None)
+ self.assertEqual(opts.kwargs['yaxis'], None)
+
+ def test_axis_set_to_false(self):
+ plot = self.df.hvplot.scatter('x', 'y', c='category', xaxis=False, yaxis=False)
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['xaxis'], None)
+ self.assertEqual(opts.kwargs['yaxis'], None)
+
+ def test_axis_set_to_none_in_holoviews_opts_default(self):
+ hv.opts.defaults(hv.opts.Scatter(xaxis=None, yaxis=None))
+ plot = self.df.hvplot.scatter('x', 'y', c='category')
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['xaxis'], None)
+ self.assertEqual(opts.kwargs['yaxis'], None)
+
+ @expectedFailure
+ def test_axis_set_to_none_in_holoviews_opts_default_overwrite_in_call(self):
+ hv.opts.defaults(hv.opts.Scatter(xaxis=None, yaxis=None))
+ plot = self.df.hvplot.scatter('x', 'y', c='category', xaxis=True, yaxis=True)
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ assert 'xaxis' not in opts.kwargs
+ assert 'yaxis' not in opts.kwargs
+
+ def test_loglog_opts(self):
+ plot = self.df.hvplot.scatter('x', 'y', c='category', loglog=True)
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['logx'], True)
+ self.assertEqual(opts.kwargs['logy'], True)
+ self.assertEqual(opts.kwargs.get('logz'), None)
+
+ def test_logy_opts(self):
+ plot = self.df.hvplot.scatter('x', 'y', c='category', logy=True)
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['logx'], False)
+ self.assertEqual(opts.kwargs['logy'], True)
+ self.assertEqual(opts.kwargs.get('logz'), None)
+
+ def test_holoviews_defined_default_opts_logx(self):
+ hv.opts.defaults(hv.opts.Scatter(logx=True))
+ plot = self.df.hvplot.scatter('x', 'y', c='category')
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['logx'], True)
+ self.assertEqual(opts.kwargs['logy'], False)
+ self.assertEqual(opts.kwargs.get('logz'), None)
+
+ def test_holoviews_defined_default_opts_logx_overwritten_in_call(self):
+ hv.opts.defaults(hv.opts.Scatter(logx=True))
+ plot = self.df.hvplot.scatter('x', 'y', c='category', logx=False)
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['logx'], False)
+ self.assertEqual(opts.kwargs['logy'], False)
+ self.assertEqual(opts.kwargs.get('logz'), None)
+
+ def test_hvplot_default_cat_cmap_opts(self):
+ plot = self.df.hvplot.scatter('x', 'y', c='category')
+ opts = Store.lookup_options('bokeh', plot, 'style')
+ self.assertEqual(opts.kwargs['cmap'], 'Category10')
+
+ def test_hvplot_default_num_cmap_opts(self):
+ plot = self.df.hvplot.scatter('x', 'y', c='number')
+ opts = Store.lookup_options('bokeh', plot, 'style')
+ self.assertEqual(opts.kwargs['cmap'], 'kbc_r')
+
+ def test_cmap_opts_by_type(self):
+ plot = self.df.hvplot.scatter('x', 'y', c='number', cmap='diverging')
+ opts = Store.lookup_options('bokeh', plot, 'style')
+ self.assertEqual(opts.kwargs['cmap'], 'coolwarm')
+
+ def test_cmap_opts_in_call(self):
+ plot = self.df.hvplot.scatter('x', 'y', c='number', cmap='fire')
+ opts = Store.lookup_options('bokeh', plot, 'style')
+ self.assertEqual(opts.kwargs['cmap'], 'fire')
+
+ def test_colormap_opts_in_call(self):
+ plot = self.df.hvplot.scatter('x', 'y', c='number', colormap='fire')
+ opts = Store.lookup_options('bokeh', plot, 'style')
+ self.assertEqual(opts.kwargs['cmap'], 'fire')
| Setting global default plot options
I would like to set default options globally for the plots generated by hvplots.
Normally in holoviews I would do this to set defaults for every plot:
import holoviews as hv
from holoviews import opts
hv.extension('bokeh')
# Set default options here
opts.defaults(opts.Curve( height=400, width=900 ,show_grid=True))
Now that I'm using hvplots the import has become the following for xarray based plotting
import xarray as xr
import hvplot.xarray
If I have an xarray dataset and I want to plot one of the dimensions then I use this
ds.myDims.hvplot()
However this gives a plot with a default width and no grid lines. To add this I need to specify the options with the plot.
ds.myDims.hvplot(height=400, width=900,grid=True)
Ideally I'd like to set the height, width, grid etc, colours etc globally, like I can in holoviews, so I don't have to specify it for every plot.
| 2019-07-23T17:03:20 |
|
holoviz/hvplot | 259 | holoviz__hvplot-259 | [
"121"
] | 5b9cfd5d209e00f892311081a1b9001ff57c8857 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -141,6 +141,8 @@ class HoloViewsConverter(object):
The width and height of the plot in pixels
attr_labels (default=True): bool
Whether to use an xarray object's attributes as labels
+ sort_date (default=True): bool
+ Whether to sort the x-axis by date before plotting
Datashader options
------------------
@@ -270,7 +272,8 @@ def __init__(self, data, x, y, kind=None, by=None, use_index=True,
clabel=None, xformatter=None, yformatter=None, tools=[],
padding=None, responsive=False, min_width=None,
min_height=None, max_height=None, max_width=None,
- attr_labels=True, coastline=False, tiles=False, **kwds):
+ attr_labels=True, coastline=False, tiles=False,
+ sort_date=True, **kwds):
# Process data and related options
self._redim = fields
@@ -288,6 +291,7 @@ def __init__(self, data, x, y, kind=None, by=None, use_index=True,
self.tiles = tiles
self.row = row
self.col = col
+ self.sort_date = sort_date
# Import geoviews if geo-features requested
if self.geo or self.datatype == 'geopandas':
@@ -1006,6 +1010,14 @@ def _process_args(self, data, x, y):
elif not x:
raise ValueError('Could not determine what to plot. Expected '
'x to be declared or use_index to be enabled.')
+ if self.sort_date and self.datatype == 'pandas':
+ from pandas.api.types import is_datetime64_any_dtype as is_datetime
+ if x in self.indexes:
+ index = self.indexes.index(x)
+ if is_datetime(data.axes[index]):
+ data = data.sort_index(axis=self.indexes.index(x))
+ elif is_datetime(data[x]):
+ data = data.sort_values(x)
y = y or self.y
if not y:
| diff --git a/hvplot/tests/testcharts.py b/hvplot/tests/testcharts.py
--- a/hvplot/tests/testcharts.py
+++ b/hvplot/tests/testcharts.py
@@ -52,6 +52,10 @@ def setUp(self):
columns=['x', 'y', 'category'])
self.cat_only_df = pd.DataFrame([['A', 'a'], ['B', 'b'], ['C', 'c']],
columns=['upper', 'lower'])
+ self.time_df = pd.DataFrame({
+ 'time': pd.date_range('1/1/2000', periods=10, tz='UTC'),
+ 'A': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ 'B': 'abcdefghij'})
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_wide_chart(self, kind, element):
@@ -153,6 +157,33 @@ def test_includes_str_if_no_num_chart(self, kind, element):
}, 'Variable')
self.assertEqual(plot, obj)
+ def test_time_df_sorts_on_plot(self):
+ scrambled = self.time_df.sample(frac=1)
+ plot = scrambled.hvplot(x='time')
+ assert (plot.data == self.time_df).all().all()
+ assert (plot.data.time.diff()[1:].astype('int') > 0).all()
+
+ def test_time_df_does_not_sort_on_plot_if_sort_date_off(self):
+ scrambled = self.time_df.sample(frac=1)
+ plot = scrambled.hvplot(x='time', sort_date=False)
+ assert (plot.data == scrambled).all().all()
+ assert not (plot.data.time.diff()[1:].astype('int') > 0).all()
+
+ def test_time_df_sorts_on_plot_using_index_as_x(self):
+ df = self.time_df.set_index('time')
+ scrambled = df.sample(frac=1)
+ plot = scrambled.hvplot()
+ assert (plot.data['time'] == df.index).all()
+ assert (plot.data.time.diff()[1:].astype('int') > 0).all()
+
+ def test_time_df_does_not_sort_on_plot_if_sort_date_off_using_index_as_x(self):
+ df = self.time_df.set_index('time')
+ scrambled = df.sample(frac=1)
+ plot = scrambled.hvplot(sort_date=False)
+ assert (plot.data.time == scrambled.index).all().all()
+ assert not (plot.data.time.diff()[1:].astype('int') > 0).all()
+
+
class TestChart1DDask(TestChart1D):
def setUp(self):
| hvPlot doesnot sort date automatically like pandas.plot()
Simple setup
```
import numpy as np
import pandas as pd
index = pd.date_range('1/1/2000', periods=1000)
index = [d.strftime("%a, %d %b %Y %H:%M:%S +0000") for d in index]
df = pd.DataFrame(np.random.randn(1000, 4), columns=list('ABCD')).cumsum()
shuffle(index)
df['date'] = index
df['date'] = df["date"].apply(pd.to_datetime)
df.head()
```
With pandas plot()
```
%matplotlib inline
df.plot(x='date')
```

```
import hvplot.pandas
df.hvplot(x='date')
```

The graph is sorted if no type is applied to **date** column (just comment the line to the the result).
| 2019-07-24T17:28:26 |
|
holoviz/hvplot | 262 | holoviz__hvplot-262 | [
"261"
] | 7e9ab73922ac68601c5fbb10476c16fbed29e8c9 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -139,8 +139,11 @@ class HoloViewsConverter(object):
ticks positions, or list of tuples of the tick positions and labels
width (default=800)/height (default=300): int
The width and height of the plot in pixels
- attr_labels (default=True): bool
- Whether to use an xarray object's attributes as labels
+ attr_labels (default=None): bool
+ Whether to use an xarray object's attributes as labels, defaults to
+ None to allow best effort without throwing a warning. Set to True
+ to see warning if the attrs can't be found, set to False to disable
+ the behavior.
sort_date (default=True): bool
Whether to sort the x-axis by date before plotting
@@ -279,7 +282,7 @@ def __init__(self, data, x, y, kind=None, by=None, use_index=True,
precompute=False, flip_xaxis=None, flip_yaxis=None,
dynspread=False, hover_cols=[], x_sampling=None,
y_sampling=None, project=False, tools=[],
- attr_labels=True, coastline=False, tiles=False,
+ attr_labels=None, coastline=False, tiles=False,
sort_date=True, **kwds):
# Process data and related options
@@ -635,10 +638,13 @@ def _process_data(self, kind, data, x, y, by, groupby, row, col,
self.streaming = streaming
self.hover_cols = hover_cols
- if da is not None and attr_labels:
+ if da is not None and attr_labels is True or attr_labels is None:
try:
var_tuples = [(var, da[var].attrs) for var in da.coords]
- var_tuples.append((da.name, da.attrs))
+ if isinstance(da, xr.Dataset):
+ var_tuples.extend([(var, da[var].attrs) for var in da.data_vars])
+ else:
+ var_tuples.append((da.name, da.attrs))
labels = {}
units = {}
for var_name, var_attrs in var_tuples:
@@ -651,9 +657,9 @@ def _process_data(self, kind, data, x, y, by, groupby, row, col,
self._redim = self._merge_redim(labels, 'label')
self._redim = self._merge_redim(units, 'unit')
except Exception as e:
- param.main.warning('Unable to auto label using xarray attrs '
- 'because {e}; suppress this warning '
- 'with attr_labels=False.'.format(e=e))
+ if attr_labels is True:
+ param.main.warning('Unable to auto label using xarray attrs '
+ 'because {e}'.format(e=e))
def _process_plot(self, color):
kind = self.kind
| diff --git a/hvplot/tests/testgridplots.py b/hvplot/tests/testgridplots.py
--- a/hvplot/tests/testgridplots.py
+++ b/hvplot/tests/testgridplots.py
@@ -34,6 +34,8 @@ def setUp(self):
self.xarr_with_attrs.x.attrs['long_name'] = 'Declination'
self.xarr_with_attrs.y.attrs['long_name'] = 'Right Ascension'
+ self.xds_with_attrs = xr.Dataset({'light': self.xarr_with_attrs })
+
def test_rgb_dataarray_no_args(self):
rgb = self.da_rgb.hvplot()
self.assertEqual(rgb, RGB(([0, 1], [0, 1])+tuple(self.da_rgb.values)))
@@ -88,6 +90,13 @@ def test_img_infer_dimension_params_from_xarray_attrs(self):
self.assertEqual(img.vdims[0].unit, 'lm')
self.assertEqual(img.vdims[0].range, (0, 2))
+ def test_table_infer_dimension_params_from_xarray_ds_attrs(self):
+ table = self.xds_with_attrs.hvplot.dataset()
+ self.assertEqual(table.kdims[0].label, 'Declination')
+ self.assertEqual(table.kdims[1].label, 'Right Ascension')
+ self.assertEqual(table.kdims[2].label, 'luminosity')
+ self.assertEqual(table.kdims[2].unit, 'lm')
+
def test_points_infer_dimension_params_from_xarray_attrs(self):
points = self.xarr_with_attrs.hvplot.points(c='value', clim=(0, 2))
self.assertEqual(points.kdims[0].label, 'Declination')
@@ -95,14 +104,14 @@ def test_points_infer_dimension_params_from_xarray_attrs(self):
self.assertEqual(points.vdims[0].label, 'luminosity')
self.assertEqual(points.vdims[0].unit, 'lm')
self.assertEqual(points.vdims[0].range, (0, 2))
-
+
def test_dataset_infer_dimension_params_from_xarray_attrs(self):
ds = self.xarr_with_attrs.hvplot.dataset()
self.assertEqual(ds.kdims[0].label, 'Declination')
self.assertEqual(ds.kdims[1].label, 'Right Ascension')
self.assertEqual(ds.kdims[2].label, 'luminosity')
self.assertEqual(ds.kdims[2].unit, 'lm')
-
+
def test_table_infer_dimension_params_from_xarray_attrs(self):
table = self.xarr_with_attrs.hvplot.dataset()
self.assertEqual(table.kdims[0].label, 'Declination')
| Fix warning from #173
In #173 a new warning came in that is very loud and assumes that things are dataarrays.
```python
ds = xr.tutorial.open_dataset('air_temperature')
ds.hvplot(groupby='time')
ds.mean(dim=['lat', 'lon']).hvplot()
```
<img width="1087" alt="Screen Shot 2019-07-25 at 4 01 57 PM" src="https://user-images.githubusercontent.com/4806877/61904810-95812480-aef5-11e9-9a66-b271f8c72505.png">
<img width="1107" alt="Screen Shot 2019-07-25 at 3 56 16 PM" src="https://user-images.githubusercontent.com/4806877/61904820-9e71f600-aef5-11e9-9b35-7bc5084224a2.png">
| 2019-07-25T20:43:41 |
|
holoviz/hvplot | 263 | holoviz__hvplot-263 | [
"54"
] | 1a012953e311e1445f214c9ecb19bc79c5766c4c | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -29,7 +29,8 @@
from .util import (
is_series, is_dask, is_intake, is_streamz, is_xarray, process_crs,
- process_intake, process_xarray, check_library, is_geopandas
+ process_intake, process_xarray, check_library, is_geopandas,
+ process_derived_datetime,
)
renderer = hv.renderer('bokeh')
@@ -574,6 +575,7 @@ def _process_data(self, kind, data, x, y, by, groupby, row, col,
if gridded_data:
not_found = [g for g in groupby if g not in data.coords]
+ not_found, *_ = process_derived_datetime(data, not_found)
data_vars = list(data.data_vars) if isinstance(data, xr.Dataset) else [data.name]
indexes = list(data.coords)
self.variables = list(data.coords) + data_vars
diff --git a/hvplot/util.py b/hvplot/util.py
--- a/hvplot/util.py
+++ b/hvplot/util.py
@@ -226,6 +226,12 @@ def is_xarray(data):
from xarray import DataArray, Dataset
return isinstance(data, (DataArray, Dataset))
+def is_xarray_groupby(data):
+ if not check_library(data, 'xarray'):
+ return False
+ from xarray.core.groupby import DataArrayGroupBy, DatasetGroupBy
+ return isinstance(data, (DataArrayGroupBy, DatasetGroupBy))
+
def process_intake(data, use_dask):
if data.container not in ('dataframe', 'xarray'):
@@ -251,6 +257,19 @@ def process_xarray(data, x, y, by, groupby, use_dask, persist, gridded, label, v
name = data.name or label or value_label
dataset = data.to_dataset(name=name)
+ all_vars = list(other_dims) if other_dims else []
+ for var in [x, y, by, groupby]:
+ if isinstance(var, list):
+ all_vars.extend(var)
+ elif isinstance(var, str):
+ all_vars.append(var)
+
+ if not gridded:
+ not_found = [var for var in all_vars if var not in list(dataset.data_vars) + list(dataset.coords)]
+ _, extra_vars, extra_coords = process_derived_datetime(dataset, not_found)
+ dataset = dataset.assign_coords(**{var: dataset[var] for var in extra_coords})
+ dataset = dataset.assign(**{var: dataset[var] for var in extra_vars})
+
data_vars = list(dataset.data_vars)
ignore = (by or []) + (groupby or [])
dims = [c for c in dataset.coords if dataset[c].shape != () and c not in ignore][::-1]
@@ -293,9 +312,36 @@ def process_xarray(data, x, y, by, groupby, use_dask, persist, gridded, label, v
elif not x and not y:
x, y = dims[0], data_vars
+ for var in [x, y]:
+ if isinstance(var, list):
+ all_vars.extend(var)
+ elif isinstance(var, str):
+ all_vars.append(var)
+
+ covered_dims = []
+ for var in all_vars:
+ if var in dataset.coords:
+ covered_dims.extend(dataset[var].dims)
+ leftover_dims = [dim for dim in dims if dim not in covered_dims + all_vars]
+
if by is None:
- by = [c for c in dims if c not in (x, y)]
- if len(by) > 1: by = []
+ by = leftover_dims if len(leftover_dims) == 1 else []
if groupby is None:
- groupby = [c for c in dims if c not in by+[x, y]]
+ groupby = [c for c in leftover_dims if c not in by]
return data, x, y, by, groupby
+
+
+def process_derived_datetime(data, not_found):
+ from pandas.api.types import is_datetime64_any_dtype as isdate
+ extra_vars = []
+ extra_coords = []
+ for var in not_found:
+ if '.' in var:
+ derived_from = var.split('.')[0]
+ if isdate(data[derived_from]):
+ if derived_from in data.coords:
+ extra_coords.append(var)
+ else:
+ extra_vars.append(var)
+ not_found = [var for var in not_found if var not in extra_vars + extra_coords]
+ return not_found, extra_vars, extra_coords
| diff --git a/hvplot/tests/testutil.py b/hvplot/tests/testutil.py
--- a/hvplot/tests/testutil.py
+++ b/hvplot/tests/testutil.py
@@ -41,8 +41,8 @@ def test_process_1d_xarray_dataarray_with_no_coords(self):
assert isinstance(data, pd.DataFrame)
assert x == 'index'
assert y == ['value']
- assert by == []
- assert groupby == []
+ assert not by
+ assert not groupby
def test_process_1d_xarray_dataarray_with_coords(self):
import xarray as xr
@@ -57,8 +57,8 @@ def test_process_1d_xarray_dataarray_with_coords(self):
assert isinstance(data, pd.DataFrame)
assert x == 'day'
assert y == ['value']
- assert by == []
- assert groupby == []
+ assert not by
+ assert not groupby
def test_process_1d_xarray_dataarray_with_coords_and_name(self):
import xarray as xr
@@ -74,8 +74,8 @@ def test_process_1d_xarray_dataarray_with_coords_and_name(self):
assert isinstance(data, pd.DataFrame)
assert x == 'day'
assert y == ['temp']
- assert by == []
- assert groupby == []
+ assert not by
+ assert not groupby
def test_process_2d_xarray_dataarray_with_no_coords(self):
import xarray as xr
@@ -87,8 +87,8 @@ def test_process_2d_xarray_dataarray_with_no_coords(self):
assert isinstance(data, pd.DataFrame)
assert x == 'index'
assert y == ['value']
- assert by == []
- assert groupby == []
+ assert not by
+ assert not groupby
def test_process_2d_xarray_dataarray_with_no_coords_as_gridded(self):
import xarray as xr
@@ -103,8 +103,8 @@ def test_process_2d_xarray_dataarray_with_no_coords_as_gridded(self):
assert list(data.data_vars.keys()) == ['value']
assert x == 'dim_1'
assert y == 'dim_0'
- assert by is None
- assert groupby is None
+ assert not by
+ assert not groupby
def test_process_2d_xarray_dataarray_with_coords_as_gridded(self):
import xarray as xr
@@ -122,8 +122,8 @@ def test_process_2d_xarray_dataarray_with_coords_as_gridded(self):
assert list(data.data_vars.keys()) == ['value']
assert x == 'y'
assert y == 'x'
- assert by is None
- assert groupby is None
+ assert not by
+ assert not groupby
def test_process_3d_xarray_dataset_with_coords(self):
import pandas as pd
@@ -132,7 +132,7 @@ def test_process_3d_xarray_dataset_with_coords(self):
assert isinstance(data, pd.DataFrame)
assert x == 'time'
assert y == ['air']
- assert by == []
+ assert not by
assert groupby == ['lon', 'lat']
def test_process_3d_xarray_dataset_with_coords_as_gridded(self):
@@ -160,5 +160,33 @@ def test_process_3d_xarray_dataset_with_coords_as_gridded_uses_axis_to_get_defau
assert list(data.data_vars.keys()) == ['air']
assert x == 'lon'
assert y == 'lat'
- assert by is None
+ assert not by
assert groupby == ['time']
+
+ def test_process_xarray_dataset_with_by_as_derived_datetime(self):
+ import pandas as pd
+
+ data = self.ds.mean(dim=['lat', 'lon'])
+ kwargs = self.default_kwargs
+ kwargs.update(gridded=False, y='air', by=['time.hour'])
+
+ data, x, y, by, groupby = process_xarray(data=data, **kwargs)
+ assert isinstance(data, pd.DataFrame)
+ assert x == 'time'
+ assert y == 'air'
+ assert by == ['time.hour']
+ assert not groupby
+
+ def test_process_xarray_dataset_with_x_as_derived_datetime(self):
+ import pandas as pd
+
+ data = self.ds.mean(dim=['lat', 'lon'])
+ kwargs = self.default_kwargs
+ kwargs.update(gridded=False, y='air', x='time.dayofyear')
+
+ data, x, y, by, groupby = process_xarray(data=data, **kwargs)
+ assert isinstance(data, pd.DataFrame)
+ assert x == 'time.dayofyear'
+ assert y == 'air'
+ assert not by
+ assert not groupby
| Derived datetime accessor (time.dayofyear)
Something like xarray

So that instead of
```
df['day'] = df.index.day
df.dropna(subset=['tmpf']).hvplot('valid', 'tmpf', groupby='day')
```
one can...
`df.dropna(subset=['tmpf']).hvplot('valid', 'tmpf', groupby='valid.day')`
| Maybe we could add a `.hvplot` method on `groupby` objects themselves so that we can support any type of grouped output. | 2019-07-26T15:27:24 |
holoviz/hvplot | 266 | holoviz__hvplot-266 | [
"265"
] | 62fb08b2801e6e7d4d26a6b3359a3dc919e264ee | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -723,7 +723,7 @@ def _process_style(self, kwds):
else:
cmap = cmap or self._default_cmaps['linear']
- if cmap in self._default_cmaps:
+ if isinstance(cmap, str) and cmap in self._default_cmaps:
cmap = self._default_cmaps[cmap]
if cmap is not None:
| diff --git a/hvplot/tests/testoptions.py b/hvplot/tests/testoptions.py
--- a/hvplot/tests/testoptions.py
+++ b/hvplot/tests/testoptions.py
@@ -218,12 +218,17 @@ def test_cmap_opts_by_type(self):
opts = Store.lookup_options('bokeh', plot, 'style')
self.assertEqual(opts.kwargs['cmap'], 'coolwarm')
- def test_cmap_opts_in_call(self):
+ def test_cmap_opts_by_name(self):
plot = self.df.hvplot.scatter('x', 'y', c='number', cmap='fire')
opts = Store.lookup_options('bokeh', plot, 'style')
self.assertEqual(opts.kwargs['cmap'], 'fire')
- def test_colormap_opts_in_call(self):
+ def test_colormap_opts_by_name(self):
plot = self.df.hvplot.scatter('x', 'y', c='number', colormap='fire')
opts = Store.lookup_options('bokeh', plot, 'style')
self.assertEqual(opts.kwargs['cmap'], 'fire')
+
+ def test_cmap_opts_as_a_list(self):
+ plot = self.df.hvplot.scatter('x', 'y', c='number', cmap=['red', 'blue', 'green'])
+ opts = Store.lookup_options('bokeh', plot, 'style')
+ self.assertEqual(opts.kwargs['cmap'], ['red', 'blue', 'green'])
| hvplot(cmap=[]) doesn't accept lists but hvplot().opts(cmap) does
Doesn't work
```
import xarray as xr
import hvplot.xarray
da = xr.tutorial.open_dataset('air_temperature').isel(time=0)['air']
da.hvplot('lon', 'lat', cmap=['red', 'blue', 'green'])
```
Works
```
import xarray as xr
import hvplot.xarray
da = xr.tutorial.open_dataset('air_temperature').isel(time=0)['air']
da.hvplot('lon', 'lat').opts(cmap=['red', 'blue', 'green'])
```
| Do you know if this used to work? I have been messing around with the cmap kwarg a bit. | 2019-07-29T13:26:10 |
holoviz/hvplot | 269 | holoviz__hvplot-269 | [
"267"
] | c092179e9c0948f0da1ee554d8af79c9261f045c | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -740,7 +740,10 @@ def _process_style(self, kwds):
if cmap is not None:
style_opts['cmap'] = cmap
- color = style_opts.get('color', process_cmap(cmap or self._default_cmaps['categorical'], categorical=True))
+ if not isinstance(cmap, dict):
+ color = style_opts.get('color', process_cmap(cmap or self._default_cmaps['categorical'], categorical=True))
+ else:
+ color = style_opts.get('color')
for k, v in style.items():
if isinstance(v, Cycle):
style_opts[k] = Cycle(values=color) if isinstance(color, list) else color
| diff --git a/hvplot/tests/testoperations.py b/hvplot/tests/testoperations.py
--- a/hvplot/tests/testoperations.py
+++ b/hvplot/tests/testoperations.py
@@ -75,6 +75,10 @@ def test_aspect_and_frame_height_with_datashade_and_dynamic_is_false(self, opt):
self.assertEqual(opts.get('height'), None)
self.assertEqual(opts.get('frame_width'), None)
+ def test_cmap_can_be_color_key(self):
+ color_key = {'A': '#ff0000', 'B': '#00ff00', 'C': '#0000ff'}
+ self.df.hvplot.points(x='x', y='y', by='category', cmap=color_key, datashade=True)
+
class TestChart2D(ComparisonTestCase):
| Loading color_key while using hvplot.Scatter with datashader=True
Hi,
Is there anyway to load a custom color_key to datashader used while calling hvplot.Scatter with datashader=True?
I need to include a legend for the scatter points colors. I'm using the recommended workaround and know how to do this while calling datashade(....color_key=colorDict). I'm looking for a way to do it while using hvplot.
Thanks.
| I'm not sure what the question is, exactly. Once you provide the color_key as above, you can overlay the corresponding color_points object from http://holoviews.org/user_guide/Large_Data.html the same for hvPlot as for HoloViews; hvPlot returns a HoloViews object that will work the same as if you created it with HoloViews. | 2019-08-01T20:46:46 |
holoviz/hvplot | 273 | holoviz__hvplot-273 | [
"204"
] | 8b8b2e6b96d01728f9720e5f51da4a3e6ec67c36 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -85,8 +85,9 @@ class HoloViewsConverter(object):
Whether to flip the axis left to right or up and down respectively
grid (default=False): boolean
Whether to show a grid
- hover (default=True): boolean
- Whether to show hover tooltips
+ hover : boolean
+ Whether to show hover tooltips, default is True unless datashade is
+ True in which case hover is False by default
hover_cols (default=[]): list
Additional columns to add to the hover tool
invert (default=False): boolean
@@ -279,7 +280,7 @@ def __init__(self, data, x, y, kind=None, by=None, use_index=True,
crs=None, fields={}, groupby=None, dynamic=True,
grid=None, legend=None, rot=None, title=None,
xlim=None, ylim=None, clim=None,
- logx=None, logy=None, loglog=None, hover=True,
+ logx=None, logy=None, loglog=None, hover=None,
subplots=False, label=None, invert=False,
stacked=False, colorbar=None, fontsize=None,
datashade=False, rasterize=False,
@@ -430,6 +431,8 @@ def __init__(self, data, x, y, kind=None, by=None, use_index=True,
plot_opts[axis] = rot
tools = list(tools) or list(plot_opts.get('tools', []))
+ if hover is None:
+ hover = not self.datashade
if hover and not any(t for t in tools if isinstance(t, HoverTool)
or t == 'hover'):
tools.append('hover')
| diff --git a/hvplot/tests/testoperations.py b/hvplot/tests/testoperations.py
--- a/hvplot/tests/testoperations.py
+++ b/hvplot/tests/testoperations.py
@@ -79,6 +79,16 @@ def test_cmap_can_be_color_key(self):
color_key = {'A': '#ff0000', 'B': '#00ff00', 'C': '#0000ff'}
self.df.hvplot.points(x='x', y='y', by='category', cmap=color_key, datashade=True)
+ def test_when_datashade_is_true_set_hover_to_false_by_default(self):
+ plot = self.df.hvplot(x='x', y='y', datashade=True)
+ opts = Store.lookup_options('bokeh', plot[0], 'plot').kwargs
+ assert 'hover' not in opts.get('tools')
+
+ def test_when_datashade_is_true_hover_can_still_be_true(self):
+ plot = self.df.hvplot(x='x', y='y', datashade=True, hover=True)
+ opts = Store.lookup_options('bokeh', plot[0], 'plot').kwargs
+ assert 'hover' in opts.get('tools')
+
class TestChart2D(ComparisonTestCase):
| hover broken for datashade

```
import pandas as pd
import hvplot.pandas
tst = pd.DataFrame([0, 1, 2])
tst.hvplot('index', 0, datashade=True)
```
| Yeah, we should suppress hover for datashade output by default since it's not useful and fix the underlying issue in holoviews.
The RGBA value is now displayed but as a int which is useless so we should disable it.
I'd prefer to display it as hex or a four tuple, rather that omit it, but I don't know how difficult that is.
That's a holoviews issue, the issue here is that hvplot displays hover info by default but I do not think it should do so for RGB by default, even if it was a hex.
I think suppressing hover for datashade, or at least disabling it by default will close #271 | 2019-08-05T14:29:03 |
holoviz/hvplot | 281 | holoviz__hvplot-281 | [
"283"
] | c8d88238e282c108705edaed6be73bb4a716efd7 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -357,10 +357,9 @@ def __init__(self, data, x, y, kind=None, by=None, use_index=True,
# Process options
self.stacked = stacked
- self._style_opts, kwds = self._process_style(kwds)
plot_opts = {**self._default_plot_opts,
- **self._process_plot(self._style_opts.get('color'))}
+ **self._process_plot()}
if xlim is not None:
plot_opts['xlim'] = tuple(xlim)
if ylim is not None:
@@ -397,6 +396,8 @@ def __init__(self, data, x, y, kind=None, by=None, use_index=True,
if plotwd in kwds:
plot_opts[plotwd] = kwds.pop(plotwd)
+ self._style_opts, plot_opts, kwds = self._process_style(kwds, plot_opts)
+
for axis_name in ['xaxis', 'yaxis']:
if axis_name in kwds:
axis = kwds.pop(axis_name)
@@ -692,7 +693,7 @@ def _process_data(self, kind, data, x, y, by, groupby, row, col,
param.main.warning('Unable to auto label using xarray attrs '
'because {e}'.format(e=e))
- def _process_plot(self, color):
+ def _process_plot(self):
kind = self.kind
options = Store.options(backend='bokeh')
elname = self._kind_mapping[kind].__name__
@@ -701,15 +702,9 @@ def _process_plot(self, color):
if kind.startswith('bar'):
plot_opts['stacked'] = self.stacked
- # Color
- if color is not None and 'colorbar' not in plot_opts:
- if 'c' in self._kind_options.get(kind, []) and (color in self.variables):
- if self.data[color].dtype.kind not in 'OSU':
- plot_opts['colorbar'] = True
-
return plot_opts
- def _process_style(self, kwds):
+ def _process_style(self, kwds, plot_opts):
kind = self.kind
eltype = self._kind_mapping[kind]
registry = Store.registry['bokeh']
@@ -756,6 +751,7 @@ def _process_style(self, kwds):
cmap = cmap or self._default_cmaps['categorical']
else:
cmap = cmap or self._default_cmaps['linear']
+ plot_opts['colorbar'] = plot_opts.get('colorbar', True)
if isinstance(cmap, str) and cmap in self._default_cmaps:
cmap = self._default_cmaps[cmap]
@@ -798,7 +794,7 @@ def _process_style(self, kwds):
if 'marker' in kwds and 'marker' in self._kind_options[self.kind]:
style_opts['marker'] = kwds.pop('marker')
- return style_opts, kwds
+ return style_opts, plot_opts, kwds
def _validate_kwds(self, kwds):
kind_opts = self._kind_options.get(self.kind, [])
@@ -1338,7 +1334,7 @@ def kde(self, x, y, data=None):
dists = NdOverlay({0: Area([], self.value_label, vdim)},
[self.group_label])
redim = self._merge_redim(ranges)
- return (dists.redim(redim).relabel(**self._relabel).opts(opts))
+ return (dists.redim(**redim).relabel(**self._relabel).opts(opts))
##########################
# Other charts #
| diff --git a/etc/test_notebooks.py b/etc/test_notebooks.py
deleted file mode 100644
--- a/etc/test_notebooks.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# Temporary script to allow checking notebooks run without errors, or
-# to approximately lint check notebooks.
-#
-# Note: lint checking will not yet work on windows unless sed is
-# present or we replace sed with python equivalent.
-#
-# Run all notebooks & render to html:
-# python examples/test_notebooks.py
-#
-# Approximately lint check all notebooks:
-# python examples/test_notebooks.py lint
-
-from __future__ import print_function
-
-import sys
-import os
-import glob
-import pprint
-
-############################################################
-# Set nbdir, run_skip, and run_allow_error for your project.
-# You may need to increase run_cell_timeout if you have
-# notebook cells that take a long time to execute.
-
-nbdir = "examples"
-
-run_skip = []
-
-run_allow_error = []
-
-run_cell_timeout = 360
-
-############################################################
-
-
-notebooks = sorted([x.replace(os.path.sep,"/") for x in glob.glob(nbdir+"/*.ipynb")])
-
-checked = []
-errored = []
-run_skipped = []
-
-if len(sys.argv) == 1:
- do_what = "run"
-elif sys.argv[1] == "lint":
- do_what = "lint"
-else:
- raise
-
-if do_what=="run":
- for nb in notebooks:
- cmd = "jupyter nbconvert %s --execute --ExecutePreprocessor.kernel_name=python%s --ExecutePreprocessor.timeout=%s --to html"%(nb,sys.version_info[0],run_cell_timeout)
- if nb in run_skip:
- run_skipped.append(nb)
- continue
-
- if nb in run_allow_error:
- cmd += " --allow-errors"
- print(cmd)
- r = os.system(cmd)
- checked.append(nb)
- if r!=0:
- errored.append(nb)
-
-elif sys.argv[1]=='lint':
- for nb in notebooks:
- cmd = """sed -e 's/%/#%/' {f} > {f}~ && jupyter nbconvert {f}~ --to python --PythonExporter.file_extension=.py~ && flake8 --ignore=E,W {p}""".format(f=nb,p=nb[0:-5]+'py~')
- print(cmd)
- r = os.system(cmd)
- checked.append(nb)
- if r!=0:
- errored.append(nb)
-else:
- raise
-
-print("%s checked"%len(checked))
-if len(checked)>0: pprint.pprint(checked)
-print()
-print("%s error(s)"%len(errored))
-if len(errored)>0: pprint.pprint(errored)
-print()
-
-if do_what == 'run':
- print("%s skipped"%len(run_skipped))
- if len(run_skipped)>0: pprint.pprint(run_skipped)
- print()
- if len(run_allow_error) > 0:
- print("Note: the following notebooks were not checked for run errors:")
- pprint.pprint(run_allow_error)
-
-sys.exit(len(errored))
| Incorrect param warning on bar plot + width
There is a new param warning that feels incorrect:
`WARNING:param.BarPlot02810: BarPlot width option is deprecated use 'bar_width' instead.`
<img width="1010" alt="Screen Shot 2019-08-08 at 9 57 27 AM" src="https://user-images.githubusercontent.com/4806877/62709258-0418b480-b9c3-11e9-985d-67eebb658669.png">
| 2019-08-07T20:40:49 |
|
holoviz/hvplot | 313 | holoviz__hvplot-313 | [
"312"
] | 17d9813049a150013d98654e835008c766128450 | diff --git a/hvplot/util.py b/hvplot/util.py
--- a/hvplot/util.py
+++ b/hvplot/util.py
@@ -31,10 +31,11 @@ def check_crs(crs):
--------
>>> p = check_crs('+units=m +init=epsg:26915')
>>> p.srs
- '+units=m +init=epsg:26915 '
+ '+proj=utm +zone=15 +datum=NAD83 +units=m +no_defs'
>>> p = check_crs('wrong')
>>> p is None
True
+
Returns
-------
A valid crs if possible, otherwise None
@@ -79,7 +80,10 @@ def proj_to_cartopy(proj):
proj = check_crs(proj)
- if proj.is_latlong():
+ if hasattr(proj, 'crs'):
+ if proj.crs.is_geographic:
+ return ccrs.PlateCarree()
+ elif proj.is_latlong(): # pyproj<2.0
return ccrs.PlateCarree()
srs = proj.srs
| diff --git a/hvplot/tests/testutil.py b/hvplot/tests/testutil.py
--- a/hvplot/tests/testutil.py
+++ b/hvplot/tests/testutil.py
@@ -190,3 +190,20 @@ def test_process_xarray_dataset_with_x_as_derived_datetime(self):
assert y == 'air'
assert not by
assert not groupby
+
+
+class TestGeoUtil(TestCase):
+
+ def setUp(self):
+ try:
+ import geoviews # noqa
+ import cartopy.crs as ccrs
+ except:
+ raise SkipTest('geoviews or cartopy not available')
+ self.ccrs = ccrs
+
+ def test_proj_to_cartopy(self):
+ from ..util import proj_to_cartopy
+ crs = proj_to_cartopy('+init=epsg:26911')
+
+ assert isinstance(crs, self.ccrs.CRS)
| specifying crs as proj4 string
I can specify the crs as EPSG code, but with my current software configuration crs as proj4 string is failing. Is this a bug or user error?
When I feed `+init=epsg:26911` as the `crs` parameter, I get:
```
AttributeError: 'Proj' object has no attribute 'is_latlong'
ValueError: Could not parse EPSG code as CRS, must be of the format 'proj4: {proj4 string}.'
ValueError: '+init=epsg:26911' must be either a valid crs or an reference to a `data.attr` containing a valid crs.
```
Cells [14,15] in this reproducible notebook demonstrates the issue:
https://nbviewer.jupyter.org/gist/rsignell-usgs/f28adc0e80fc3c2b29444ff6a26f21d4
Software versions:
```
# Name Version Build Channel
bokeh 1.3.4 py37_0 conda-forge
datashader 0.7.0 py_1 conda-forge
holoviews 1.12.1 py_2 conda-forge
hvplot 0.4.0 py_1 conda-forge
geoviews 1.6.3 py_0 conda-forge
geoviews-core 1.6.3 py_0 conda-forge
cartopy 0.17.0 py37he1be148_1005 conda-forge
```
Full traceback:
```
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
/srv/conda/envs/notebook/lib/python3.7/site-packages/hvplot/util.py in process_crs(crs)
175 try:
--> 176 crs = proj_to_cartopy(crs)
177 except:
/srv/conda/envs/notebook/lib/python3.7/site-packages/hvplot/util.py in proj_to_cartopy(proj)
81
---> 82 if proj.is_latlong():
83 return ccrs.PlateCarree()
AttributeError: 'Proj' object has no attribute 'is_latlong'
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
/srv/conda/envs/notebook/lib/python3.7/site-packages/hvplot/converter.py in _process_crs(self, data, crs)
395 try:
--> 396 return process_crs(_crs)
397 except ValueError:
/srv/conda/envs/notebook/lib/python3.7/site-packages/hvplot/util.py in process_crs(crs)
177 except:
--> 178 raise ValueError("Could not parse EPSG code as CRS, must be of the format 'proj4: {proj4 string}.'")
179 elif not isinstance(crs, ccrs.CRS):
ValueError: Could not parse EPSG code as CRS, must be of the format 'proj4: {proj4 string}.'
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-15-235f34bf5baa> in <module>
----> 1 gv.tile_sources.OSM * da.hvplot.quadmesh(rasterize=True, crs=crs, colormap='viridis').opts(alpha=0.7)
/srv/conda/envs/notebook/lib/python3.7/site-packages/hvplot/__init__.py in quadmesh(self, x, y, z, colorbar, **kwds)
577 The HoloViews representation of the plot.
578 """
--> 579 return self(x, y, z=z, kind='quadmesh', colorbar=colorbar, **kwds)
580
581 def contour(self, x=None, y=None, z=None, colorbar=True, **kwds):
/srv/conda/envs/notebook/lib/python3.7/site-packages/hvplot/__init__.py in __call__(self, x, y, kind, **kwds)
181 HoloViews object: Object representing the requested visualization
182 """
--> 183 return self._get_converter(x, y, kind, **kwds)(kind, x, y)
184
185 def _get_converter(self, x=None, y=None, kind=None, **kwds):
/srv/conda/envs/notebook/lib/python3.7/site-packages/hvplot/__init__.py in _get_converter(self, x, y, kind, **kwds)
189 kind = kind or params.pop('kind', None)
190 return HoloViewsConverter(
--> 191 self._data, x, y, kind=kind, **params
192 )
193
/srv/conda/envs/notebook/lib/python3.7/site-packages/hvplot/converter.py in __init__(self, data, x, y, kind, by, use_index, group_label, value_label, backlog, persist, use_dask, crs, fields, groupby, dynamic, width, height, shared_axes, grid, legend, rot, title, xlim, ylim, clim, xticks, yticks, logx, logy, loglog, hover, subplots, label, invert, stacked, colorbar, fontsize, colormap, datashade, rasterize, row, col, figsize, debug, xaxis, yaxis, framewise, aggregator, projection, global_extent, geo, precompute, flip_xaxis, flip_yaxis, dynspread, hover_cols, x_sampling, y_sampling, project, xlabel, ylabel, xformatter, yformatter, tools, padding, **kwds)
238 self.dynamic = dynamic
239 self.geo = geo or crs or global_extent or projection or project
--> 240 self.crs = self._process_crs(data, crs) if self.geo else None
241 self.project = project
242 self.row = row
/srv/conda/envs/notebook/lib/python3.7/site-packages/hvplot/converter.py in _process_crs(self, data, crs)
400 raise ValueError(
401 "'{}' must be either a valid crs or an reference to "
--> 402 "a `data.attr` containing a valid crs.".format(crs))
403
404
ValueError: '+init=epsg:26911' must be either a valid crs or an reference to a `data.attr` containing a valid crs.
```
| What happens if you just use: ``'epsg:26911'``
@philippjfr, yes, that works, but is that using the proj4 syntax?
Probably not, no. I would expect `'proj4: +init=epsg:26911'` to work but it doesn't seem to either.
I suspect it's this https://github.com/pytroll/pycoast/issues/27
>I suspect it's this pytroll/pycoast#27
Good find, that seems to be it.
I can open a PR
| 2019-09-23T18:35:49 |
holoviz/hvplot | 316 | holoviz__hvplot-316 | [
"311"
] | 9032c47c34ad6319c7645e66a9a743e75133bcc4 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -7,6 +7,7 @@
import holoviews as hv
import pandas as pd
import numpy as np
+import colorcet as cc
from bokeh.models import HoverTool
from holoviews.core.dimension import Dimension
@@ -270,7 +271,7 @@ class HoloViewsConverter(object):
_default_cmaps = {
'linear': 'kbc_r',
- 'categorical': 'Category10',
+ 'categorical': cc.palette['glasbey_category10'],
'cyclic': 'colorwheel',
'diverging': 'coolwarm'
}
| diff --git a/hvplot/tests/testoptions.py b/hvplot/tests/testoptions.py
--- a/hvplot/tests/testoptions.py
+++ b/hvplot/tests/testoptions.py
@@ -206,9 +206,10 @@ def test_holoviews_defined_default_opts_logx_overwritten_in_call(self):
self.assertEqual(opts.kwargs.get('logz'), None)
def test_hvplot_default_cat_cmap_opts(self):
+ import colorcet as cc
plot = self.df.hvplot.scatter('x', 'y', c='category')
opts = Store.lookup_options('bokeh', plot, 'style')
- self.assertEqual(opts.kwargs['cmap'], 'Category10')
+ self.assertEqual(opts.kwargs['cmap'], cc.palette['glasbey_category10'])
def test_hvplot_default_num_cmap_opts(self):
plot = self.df.hvplot.scatter('x', 'y', c='number')
| default cmap for categorical should change depending on # of categories
The default cmap for categorical is Category10 so when more than 10 categories are in a dataset colors are recycled. It would be nice if the cmap would change to 'Category20' for example, when 15 categories are shown.
| I'd just change the default to glasbey_category10 (see https://colorcet.pyviz.org/user_guide/Categorical.html) to give 256 available colors without affecting the first 10.
Should we do this in hvplot 0.5.0? We are setting a new default cmap for linear data, so might make sense. | 2019-09-24T18:25:35 |
holoviz/hvplot | 318 | holoviz__hvplot-318 | [
"317"
] | 9032c47c34ad6319c7645e66a9a743e75133bcc4 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -1106,8 +1106,10 @@ def single_chart(self, element, x, y, data=None):
chart = element(data, kdims, vdims).relabel(**self._relabel)
return chart.redim(**self._redim).opts(opts)
- def _process_chart_args(self, data, x, y, single_y=False):
- data = self.data if data is None else data
+ def _process_chart_x(self, data, x):
+ """This should happen before _process_chart_y"""
+ if x is False:
+ return None
x = x or self.x
if x is None:
@@ -1115,19 +1117,13 @@ def _process_chart_args(self, data, x, y, single_y=False):
x = self.indexes[0]
else:
x = [c for c in data.columns if c not in self.by+self.groupby][0]
- elif not x:
- raise ValueError('Could not determine what to plot. Expected '
- 'x to be declared or use_index to be enabled.')
- if self.sort_date and self.datatype == 'pandas':
- from pandas.api.types import is_datetime64_any_dtype as is_datetime
- if x in self.indexes:
- index = self.indexes.index(x)
- if is_datetime(data.axes[index]):
- data = data.sort_index(axis=self.indexes.index(x))
- elif x in data.columns:
- if is_datetime(data[x]):
- data = data.sort_values(x)
+ if not x:
+ raise ValueError('Could not determine what to plot. Set x explicitly')
+ return x
+
+ def _process_chart_y(self, data, x, y, single_y):
+ """This should happen after _process_chart_x"""
y = y or self.y
if y is None:
ys = [c for c in data.columns if c not in [x]+self.by+self.groupby]
@@ -1138,7 +1134,26 @@ def _process_chart_args(self, data, x, y, single_y=False):
if len(num_ys) >= 1:
ys = num_ys
y = ys[0] if len(ys) == 1 or single_y else ys
+ return y
+
+ def _process_chart_args(self, data, x, y, single_y=False):
+ data = self.data if data is None else data
+
+ x = self._process_chart_x(data, x)
+ y = self._process_chart_y(data, x, y, single_y)
+
+ # sort by date if enabled and x is a date
+ if x is not None and self.sort_date and self.datatype == 'pandas':
+ from pandas.api.types import is_datetime64_any_dtype as is_datetime
+ if x in self.indexes:
+ index = self.indexes.index(x)
+ if is_datetime(data.axes[index]):
+ data = data.sort_index(axis=self.indexes.index(x))
+ elif x in data.columns:
+ if is_datetime(data[x]):
+ data = data.sort_values(x)
+ # set index to column if needed in hover_cols
if self.use_index and any(c for c in self.hover_cols if
c in self.indexes and
c not in data.columns):
@@ -1260,7 +1275,7 @@ def _stats_plot(self, element, y, data=None):
"""
Helper method to generate element from indexed dataframe.
"""
- data, x, y = self._process_chart_args(data, None, y)
+ data, x, y = self._process_chart_args(data, False, y)
opts = {'plot': dict(self._plot_opts), 'norm': self._norm_opts,
'style': self._style_opts}
| Sorting non-categorical axis by
Not sure whether this is an an hvplot issue or a holoviews issue, but there doesn't seem to be any sorting for non-cat `by` at least for violin plots.
```python
import xarray as xr
import hvplot.xarrray
ds = xr.tutorial.open_dataset('air_temperature')
ds.hvplot.violin('air', by='lat', c='lat', cmap='fire')
```
<img width="1011" alt="Screen Shot 2019-09-24 at 3 31 33 PM" src="https://user-images.githubusercontent.com/4806877/65544049-75afb200-dee0-11e9-93d9-3b5fde9fd779.png">
Compared to the [docs](https://hvplot.pyviz.org/user_guide/Gridded_Data.html):
<img width="1071" alt="Screen Shot 2019-09-24 at 3 33 36 PM" src="https://user-images.githubusercontent.com/4806877/65544178-b60f3000-dee0-11e9-835c-76441cabd365.png">
| It's this logic that's doing it:
```python
if self.sort_date and self.datatype == 'pandas':
from pandas.api.types import is_datetime64_any_dtype as is_datetime
if x in self.indexes:
index = self.indexes.index(x)
if is_datetime(data.axes[index]):
data = data.sort_index(axis=self.indexes.index(x))
elif x in data.columns:
if is_datetime(data[x]):
data = data.sort_values(x)
```
We shouldn't sort by columns not being used. | 2019-09-24T21:10:31 |
|
holoviz/hvplot | 323 | holoviz__hvplot-323 | [
"325"
] | ab43c4f68aa7e485326dea567a348b96d24ebf60 | diff --git a/hvplot/plotting/core.py b/hvplot/plotting/core.py
--- a/hvplot/plotting/core.py
+++ b/hvplot/plotting/core.py
@@ -1,8 +1,15 @@
from __future__ import absolute_import
+from collections import defaultdict
import param
+try:
+ import panel as pn
+ panel_available = True
+except:
+ panel_available = False
from ..converter import HoloViewsConverter
+from ..util import process_dynamic_args
class hvPlotBase(object):
@@ -31,8 +38,38 @@ def __call__(self, x=None, y=None, kind=None, **kwds):
-------
HoloViews object: Object representing the requested visualization
"""
- if kind is not None and kind not in self.__all__:
- raise NotImplementedError(f"kind='{kind}' for data of type {type(self._data)}")
+ if isinstance(kind, str) and kind not in self.__all__:
+ raise NotImplementedError("kind='{kind}' for data of type {type}".format(
+ kind=kind, type=type(self._data)))
+
+ if panel_available:
+ dynamic , arg_deps, arg_names = process_dynamic_args(x, y, kind, **kwds)
+ if dynamic or arg_deps:
+ if kwds.get('groupby', None):
+ raise ValueError('Groupby is not yet supported when using explicit widgets')
+ @pn.depends(*arg_deps, **dynamic)
+ def callback(*args, **dyn_kwds):
+ xd = dyn_kwds.pop('x', x)
+ yd = dyn_kwds.pop('y', y)
+ kindd = dyn_kwds.pop('kind', kind)
+
+ combined_kwds = dict(kwds, **dyn_kwds)
+ fn_args = defaultdict(list)
+ for name, arg in zip(arg_names, args):
+ fn_args[(name, kwds[name])].append(arg)
+ for (name, fn), args in fn_args.items():
+ combined_kwds[name] = fn(*args)
+ return self._get_converter(xd, yd, kindd, **combined_kwds)(kindd, xd, yd)
+
+ return pn.panel(callback)
+ elif 'widgets' in kwds:
+ widgets = kwds.pop('widgets')
+ for w in widgets.values():
+ if not issubclass(w, pn.widgets.Widget):
+ raise ValueError('Expected widgets to be dict of form dim: '
+ 'pn.widgets.Widget Got type {}'.format(w))
+ plot = self._get_converter(x, y, kind, **kwds)(kind, x, y)
+ return pn.panel(plot, widgets=widgets)
return self._get_converter(x, y, kind, **kwds)(kind, x, y)
diff --git a/hvplot/util.py b/hvplot/util.py
--- a/hvplot/util.py
+++ b/hvplot/util.py
@@ -4,9 +4,16 @@
from __future__ import absolute_import
from distutils.version import LooseVersion
+from types import FunctionType
import pandas as pd
import holoviews as hv
+import param
+try:
+ import panel as pn
+ panel_available = True
+except:
+ panel_available = False
from holoviews.core.util import basestring
@@ -384,3 +391,26 @@ def process_derived_datetime_pandas(data, not_found, indexes=None):
not_found = [var for var in not_found if var not in extra_cols.keys()]
return not_found, data
+
+
+def process_dynamic_args(x, y, kind, **kwds):
+ dynamic = {}
+ arg_deps = []
+ arg_names = []
+
+ for k, v in list(kwds.items()) + [('x', x), ('y', y), ('kind', kind)]:
+ if isinstance(v, param.Parameter):
+ dynamic[k] = v
+ elif panel_available and isinstance(v, pn.widgets.Widget):
+ if LooseVersion(pn.__version__) < '0.6.4':
+ dynamic[k] = v.param.value
+ else:
+ dynamic[k] = v
+
+ for k, v in kwds.items():
+ if k not in dynamic and isinstance(v, FunctionType) and hasattr(v, '_dinfo'):
+ deps = v._dinfo['dependencies']
+ arg_deps += list(deps)
+ arg_names += list(k) * len(deps)
+
+ return dynamic, arg_deps, arg_names
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -130,6 +130,7 @@ def package_assets(example_path):
_examples_extra = [
'geoviews >=1.6.0',
+ 'panel',
'geopandas',
'xarray',
'networkx',
| diff --git a/hvplot/tests/testpanel.py b/hvplot/tests/testpanel.py
new file mode 100644
--- /dev/null
+++ b/hvplot/tests/testpanel.py
@@ -0,0 +1,69 @@
+"""
+Tests for panel widgets and param objects as arguments
+"""
+from unittest import TestCase, SkipTest
+
+from hvplot.util import process_xarray # noqa
+
+def look_for_class(panel, classname, items=None):
+ """
+ Descend a panel object and find any instances of the given class
+ """
+ import panel as pn
+
+ if items is None:
+ items = []
+ if isinstance(panel, pn.layout.ListPanel):
+ for p in panel:
+ items = look_for_class(p, classname, items)
+ elif isinstance(panel, classname):
+ items.append(panel)
+ return items
+
+
+class TestPanelObjects(TestCase):
+
+ def setUp(self):
+ try:
+ import panel as pn # noqa
+ import hvplot.pandas # noqa
+ except:
+ raise SkipTest('panel not available')
+
+ from bokeh.sampledata.iris import flowers
+ self.flowers = flowers
+ self.cols = list(self.flowers.columns[:-1])
+
+
+ def test_using_explicit_widgets_works(self):
+ import panel as pn
+
+ x = pn.widgets.Select(name='x', value='sepal_length', options=self.cols)
+ y = pn.widgets.Select(name='y', value='sepal_width', options=self.cols)
+ kind = pn.widgets.Select(name='kind', value='scatter', options=['bivariate', 'scatter'])
+ by_species = pn.widgets.Checkbox(name='By species')
+ color = pn.widgets.ColorPicker(value='#ff0000')
+
+ @pn.depends(by_species.param.value, color.param.value)
+ def by_species_fn(by_species, color):
+ return 'species' if by_species else color
+
+ self.flowers.hvplot(x, y=y, kind=kind.param.value, c=color)
+
+ def test_casting_widgets_to_different_classes(self):
+ import panel as pn
+
+ pane = self.flowers.hvplot.scatter(
+ groupby='species', legend='top_right',
+ widgets={'species': pn.widgets.DiscreteSlider})
+
+ assert len(look_for_class(pane, pn.widgets.DiscreteSlider)) == 1
+
+ def test_using_explicit_widgets_with_groupby_raises_error(self):
+ import panel as pn
+
+ x = pn.widgets.Select(name='x', value='sepal_length', options=self.cols)
+ y = pn.widgets.Select(name='y', value='sepal_width', options=self.cols)
+
+ with self.assertRaisesRegex(ValueError, "Groupby is not yet"):
+ self.flowers.hvplot(x, y, groupby='species')
diff --git a/hvplot/tests/testutil.py b/hvplot/tests/testutil.py
--- a/hvplot/tests/testutil.py
+++ b/hvplot/tests/testutil.py
@@ -207,3 +207,58 @@ def test_proj_to_cartopy(self):
crs = proj_to_cartopy('+init=epsg:26911')
assert isinstance(crs, self.ccrs.CRS)
+
+
+class TestDynamicArgs(TestCase):
+
+ def setUp(self):
+ try:
+ import panel as pn # noqa
+ except:
+ raise SkipTest('panel not available')
+
+ def test_dynamic_and_static(self):
+ import panel as pn
+ from ..util import process_dynamic_args
+
+ x = 'sepal_width'
+ y = pn.widgets.Select(name='y', value='sepal_length', options=['sepal_length', 'petal_length'])
+ kind = pn.widgets.Select(name='kind', value='scatter', options=['bivariate', 'scatter'])
+
+ dynamic, arg_deps, arg_names = process_dynamic_args(x, y, kind)
+ assert 'x' not in dynamic
+ assert 'y' in dynamic
+ assert arg_deps == []
+
+ def test_dynamic_kwds(self):
+ import panel as pn
+ from ..util import process_dynamic_args
+
+ x = 'sepal_length'
+ y = 'sepal_width'
+ kind = 'scatter'
+ color = pn.widgets.ColorPicker(value='#ff0000')
+
+ dynamic, arg_deps, arg_names = process_dynamic_args(x, y, kind, c=color)
+ assert 'x' not in dynamic
+ assert 'c' in dynamic
+ assert arg_deps == []
+
+ def test_fn_kwds(self):
+ import panel as pn
+ from ..util import process_dynamic_args
+
+ x = 'sepal_length'
+ y = 'sepal_width'
+ kind = 'scatter'
+ by_species = pn.widgets.Checkbox(name='By species')
+ color = pn.widgets.ColorPicker(value='#ff0000')
+
+ @pn.depends(by_species.param.value, color.param.value)
+ def by_species_fn(by_species, color):
+ return 'species' if by_species else color
+
+ dynamic, arg_deps, arg_names = process_dynamic_args(x, y, kind, c=by_species_fn)
+ assert dynamic == {}
+ assert arg_names == ['c', 'c']
+ assert len(arg_deps) == 2
| Users should be able to specify which widgets they want for groupby
I think the syntax will be `widgets={'dim': pn.widgets.DiscreteSlider}`
| 2019-10-02T17:51:48 |
|
holoviz/hvplot | 341 | holoviz__hvplot-341 | [
"337"
] | 02d5c149931249b3ef8efab23bec1a14d75e7f98 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -968,7 +968,7 @@ def __call__(self, kind, x, y):
else:
opts['cmap'] = process_cmap(cmap, levels)
- if self.by:
+ if self.by and not self.subplots:
opts['aggregator'] = reductions.count_cat(self.by[0])
if self.aggregator:
agg = self.aggregator
| Using by on datashade/rasterize plot
As described in this [SO post](https://stackoverflow.com/questions/58378802/aggregation-column-category-not-found-when-i-set-datashader-true-with-hvplot), the datashade/rasterize operations try to apply to the `by` column even though that column is being grouped over and therefore no longer exists.
| 2019-10-17T16:42:46 |
||
holoviz/hvplot | 343 | holoviz__hvplot-343 | [
"336"
] | 775fc0d25ac20b299c5350400f3ad01ecfd23393 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -30,7 +30,7 @@
from pandas import DatetimeIndex, MultiIndex
from .util import (
- is_tabular, is_series, is_dask, is_intake, is_streamz, is_xarray,
+ is_tabular, is_series, is_dask, is_intake, is_streamz, is_xarray, is_xarray_dataarray,
process_crs, process_intake, process_xarray, check_library, is_geopandas,
process_derived_datetime_xarray, process_derived_datetime_pandas
)
@@ -498,7 +498,10 @@ def _process_symmetric(self, symmetric, clim, check_symmetric_max):
if self.data.chunks:
return False
data = self.data[self.z]
- if data.size > check_symmetric_max:
+ if is_xarray_dataarray(data):
+ if data.size > check_symmetric_max:
+ return False
+ else:
return False
elif self._color_dim:
@@ -920,12 +923,13 @@ def __call__(self, kind, x, y):
else:
obj = dataset.map(lambda ds: method(x, y, data=ds.data), Dataset)
elif len(zs) > 1:
+ dimensions = [Dimension(self.group_label, values=zs)]
if self.dynamic:
- dataset = DynamicMap(lambda z: method(x, y, z, data=dataset.data),
- kdims=[Dimension(self.group_label, values=zs)])
+ obj = DynamicMap(lambda z: method(x, y, z, data=dataset.data),
+ kdims=dimensions)
else:
- dataset = HoloMap({z: method(x, y, z, data=dataset.data) for z in zs},
- kdims=[self.group_label])
+ obj = HoloMap({z: method(x, y, z, data=dataset.data) for z in zs},
+ kdims=dimensions)
else:
obj = method(x, y, data=dataset.data)
if self.grid:
diff --git a/hvplot/util.py b/hvplot/util.py
--- a/hvplot/util.py
+++ b/hvplot/util.py
@@ -253,6 +253,12 @@ def is_xarray(data):
from xarray import DataArray, Dataset
return isinstance(data, (DataArray, Dataset))
+def is_xarray_dataarray(data):
+ if not check_library(data, 'xarray'):
+ return False
+ from xarray import DataArray
+ return isinstance(data, DataArray)
+
def process_intake(data, use_dask):
if data.container not in ('dataframe', 'xarray'):
| diff --git a/hvplot/tests/testgridplots.py b/hvplot/tests/testgridplots.py
--- a/hvplot/tests/testgridplots.py
+++ b/hvplot/tests/testgridplots.py
@@ -38,6 +38,11 @@ def setUp(self):
self.da_img = xr.DataArray(np.arange(-2, 2).reshape((2, 2)), name='foo')
self.big_img = xr.DataArray(np.arange(-1e6, 1e6).reshape(1000, 2000))
+ self.ds = xr.Dataset({
+ 'temp': (('lon', 'lat'), 15 + 8 * np.random.randn(2, 2)),
+ 'precip': (('lon', 'lat'), 10 * np.random.rand(2, 2))},
+ coords={'lon': [-99.83, -99.32],'lat': [42.25, 42.21]})
+
def test_rgb_dataarray_no_args(self):
rgb = self.da_rgb.hvplot()
self.assertEqual(rgb, RGB(([0, 1], [0, 1])+tuple(self.da_rgb.values)))
@@ -155,3 +160,10 @@ def test_symmetric_with_big_img_and_check_symmetric_max_calculates_symmetric(sel
self.assertEqual(plot_opts.kwargs.get('symmetric'), True)
style_opts = Store.lookup_options('bokeh', plot, 'style')
self.assertEqual(style_opts.kwargs['cmap'], 'coolwarm')
+
+ def test_multiple_zs(self):
+ plot = self.ds.hvplot(x='lat', y='lon', z=['temp', 'precip'], dynamic=False)
+ assert 'temp' in plot.keys()
+ assert 'precip' in plot.keys()
+ assert plot['temp'].kdims == ['lat', 'lon']
+ assert plot['precip'].kdims == ['lat', 'lon']
| Missing behavior for zs
As reported in https://stackoverflow.com/questions/58350586/generate-xarray-dataset-image-subplots, passing a list of `zs` appears not to be fully implemented in current hvplot master:
```
import xarray as xr, numpy as np, hvplot.xarray
ds = xr.Dataset({
'temp': (('lon', 'lat'), 15 + 8 * np.random.randn(2, 2)),
'precip': (('lon', 'lat'), 10 * np.random.rand(2, 2))},
coords={'lon': [-99.83, -99.32],'lat': [42.25, 42.21]})
ds.hvplot(x="lat", y="lon", z=['temp', 'precip'])
```
```
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-18-ca3236b8574f> in <module>
4 'precip': (('lon', 'lat'), 10 * np.random.rand(2, 2))},
5 coords={'lon': [-99.83, -99.32],'lat': [42.25, 42.21]})
----> 6 ds.hvplot(x="lat", y="lon", z=['temp', 'precip'])
~/hvplot/hvplot/plotting/core.py in __call__(self, x, y, kind, **kwds)
72 return pn.panel(plot, **panel_dict)
73
---> 74 return self._get_converter(x, y, kind, **kwds)(kind, x, y)
75
76 def _get_converter(self, x=None, y=None, kind=None, **kwds):
~/hvplot/hvplot/converter.py in __call__(self, kind, x, y)
938
939 if not (self.datashade or self.rasterize):
--> 940 return self._apply_layers(obj)
941
942 try:
UnboundLocalError: local variable 'obj' referenced before assignment
```
Plotting works fine if a single item (precip or temp here) is passed for `z`, but a multi-item list of zs fails. It _appears_ to have been intended to be implemented, given the handling for `len(zs) > 1` in
[`hvplot.converter.HoloViewsConverter.__call__()`](https://github.com/pyviz/hvplot/blob/master/hvplot/converter.py#L913), but that code defines `dataset` without defining `obj`, causing a problem when all the various branches eventually lead to something processing `obj`. A single-item list of zs also fails, for a different reason, but that might not have been expected to be implemented. The original poster was hoping to use multiple `zs` as subplots, which seems reasonable given how multiple `ys` work, though without subplots it's not clear how the multiple zs should be handled. (With `ys`, the default is to overlay them, but overlaying multiple colormaps isn't particularly meaningful, so a default of getting a widget seems more useful in that case.) In any case, this case appears to be _partially_ implemented, and it seems like it should either be fully implemented or explicitly made into an error.
| 2019-10-17T17:04:01 |
|
holoviz/hvplot | 344 | holoviz__hvplot-344 | [
"342"
] | 0f2f67ad2e211dcd3d741bf47ad5e3f6e02db142 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -312,7 +312,7 @@ def __init__(self, data, x, y, kind=None, by=None, use_index=True,
self.value_label = value_label
self.group_label = group_label
self.dynamic = dynamic
- self.geo = geo or crs or global_extent or projection or project
+ self.geo = any([geo, crs, global_extent, projection, project, coastline])
self.crs = self._process_crs(data, crs) if self.geo else None
self.project = project
self.coastline = coastline
| diff --git a/hvplot/tests/testgeo.py b/hvplot/tests/testgeo.py
--- a/hvplot/tests/testgeo.py
+++ b/hvplot/tests/testgeo.py
@@ -73,6 +73,13 @@ def test_plot_with_coastline(self):
coastline = plot.get(1)
self.assertIsInstance(coastline, gv.Feature)
+ def test_plot_with_coastline_sets_geo_by_default(self):
+ import geoviews as gv
+ plot = self.df.hvplot.points('x', 'y', coastline=True)
+ self.assertEqual(len(plot), 2)
+ coastline = plot.get(1)
+ self.assertIsInstance(coastline, gv.Feature)
+
def test_plot_with_coastline_scale(self):
plot = self.df.hvplot.points('x', 'y', geo=True, coastline='10m')
opts = plot.get(1).opts.get('plot')
| Should coastline=True automatically trigger geo=True if crs is not specified?
```
import xarray as xr
ds = xr.tutorial.open_dataset('air_temperature')
ds.hvplot('lon', 'lat', coastline=True)
```
When I set coastline=True, I expect to see a coastline, but this isn't the case.
| Right. I think `coastline` should. But then they are different from `tiles` which don't require geoviews, so shouldn't set `geo=True` (probably?).
I agree that tiles probably shouldn't set geo=True since it's a holoviews thing, but coastline I think is a geoviews exclusive so coastline -> geo=True | 2019-10-17T17:35:47 |
holoviz/hvplot | 446 | holoviz__hvplot-446 | [
"444"
] | 36c1f8c9769b001d3991df1d80581134e0ce8f20 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -235,7 +235,8 @@ class HoloViewsConverter(object):
'min_height', 'min_width', 'frame_height', 'frame_width',
'aspect', 'data_aspect', 'fontscale']
- _style_options = ['color', 'alpha', 'colormap', 'fontsize', 'c', 'cmap']
+ _style_options = ['color', 'alpha', 'colormap', 'fontsize', 'c', 'cmap',
+ 'color_key']
_op_options = ['datashade', 'rasterize', 'x_sampling', 'y_sampling',
'aggregator']
@@ -794,10 +795,12 @@ def _process_style(self, kwds, plot_opts):
style_opts.update(**{k: v for k, v in kwds.items() if k in valid_opts})
# Color
- if 'cmap' in kwds and 'colormap' in kwds:
- raise TypeError("Only specify one of `cmap` and `colormap`.")
+ cmap_kwds = {'cmap', 'colormap', 'color_key'}.intersection(kwds)
+ if len(cmap_kwds) > 1:
+ raise TypeError('Specify at most one of `cmap`, `colormap`, or '
+ '`color_key`.')
- cmap = kwds.pop('cmap', kwds.pop('colormap', None))
+ cmap = kwds[cmap_kwds.pop()] if cmap_kwds else None
color = kwds.pop('color', kwds.pop('c', None))
if color is not None:
| diff --git a/hvplot/tests/testoperations.py b/hvplot/tests/testoperations.py
--- a/hvplot/tests/testoperations.py
+++ b/hvplot/tests/testoperations.py
@@ -100,6 +100,9 @@ def test_aspect_and_frame_height_with_datashade_and_dynamic_is_false(self, opt):
def test_cmap_can_be_color_key(self):
color_key = {'A': '#ff0000', 'B': '#00ff00', 'C': '#0000ff'}
self.df.hvplot.points(x='x', y='y', by='category', cmap=color_key, datashade=True)
+ with self.assertRaises(TypeError):
+ self.df.hvplot.points(x='x', y='y', by='category', datashade=True,
+ cmap='kbc_r', color_key=color_key)
def test_when_datashade_is_true_set_hover_to_false_by_default(self):
plot = self.df.hvplot(x='x', y='y', datashade=True)
| allow color_key an alias for cmap
The ability to pass cmap dicts through to datashader color_key was solved in the fix for #267, but it may be good to also accept `color_key` as an argument. See https://discourse.holoviz.org/t/keyed-color-map-for-image/505.
BTW, @jsignell mentioned in the fix the need to think about the legend ... which would be great to have too.
| That seems like a reasonable suggestion. Would you be willing to open a PR?
Maybe. Can you can point me to an example of the pythonic way to alias a kwarg, one that changes the documentation correctly too?
Cool! You'll want to mimic what's going on with `colormap` in this file: https://github.com/holoviz/hvplot/blob/master/hvplot/converter.py | 2020-04-19T03:30:11 |
holoviz/hvplot | 494 | holoviz__hvplot-494 | [
"478"
] | 35481575ed16e40e82f5768e0bf096163b65ffc9 | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -217,9 +217,7 @@ def package_assets(example_path):
extras_require=extras_require,
tests_require=extras_require['tests'],
entry_points={
- 'console_scripts': [
- 'hvplot = hvplot.__main__:main'
- ],
+ 'console_scripts': [],
'pandas_plotting_backends': [
'holoviews = hvplot:plotting',
],
| Entrypoint broken
The setup.py specifies `hvplot.__main__` as a console_script but that doesn't actually exist.
| 2020-07-27T15:55:39 |
||
holoviz/hvplot | 495 | holoviz__hvplot-495 | [
"114"
] | 17ce0cc18a0393ae82d3a9f6f11b3bc4cad29b30 | diff --git a/hvplot/plotting/scatter_matrix.py b/hvplot/plotting/scatter_matrix.py
--- a/hvplot/plotting/scatter_matrix.py
+++ b/hvplot/plotting/scatter_matrix.py
@@ -1,4 +1,6 @@
from __future__ import absolute_import
+from functools import partial
+import warnings
import holoviews as _hv
@@ -11,6 +13,7 @@ def scatter_matrix(data, c=None, chart='scatter', diagonal='hist',
alpha=0.5, nonselection_alpha=0.1,
tools=None, cmap=None, colormap=None,
diagonal_kwds=None, hist_kwds=None, density_kwds=None,
+ datashade=False, rasterize=False, dynspread=False, spread=False,
**kwds):
"""
Scatter matrix of numeric columns.
@@ -43,7 +46,25 @@ def scatter_matrix(data, c=None, chart='scatter', diagonal='hist',
Default is `Category10 <https://github.com/d3/d3-3.x-api-reference/blob/master/Ordinal-Scales.md#category10>`.
diagonal_kwds/hist_kwds/density_kwds: dict, optional
Keyword options for the diagonal plots
- kwds: Keyword options for the off-diagonal plots, optional
+ datashade (default=False):
+ Whether to apply rasterization and shading (colormapping) using
+ the Datashader library, returning an RGB object instead of
+ individual points
+ rasterize (default=False):
+ Whether to apply rasterization using the Datashader library,
+ returning an aggregated Image (to be colormapped by the
+ plotting backend) instead of individual points
+ dynspread (default=False):
+ For plots generated with datashade=True or rasterize=True,
+ automatically increase the point size when the data is sparse
+ so that individual points become more visible.
+ kwds supported include ``max_px``, ``threshold``, ``shape``, ``how`` and ``mask``.
+ spread (default=False):
+ Make plots generated with datashade=True or rasterize=True
+ increase the point size to make points more visible, by
+ applying a fixed spreading of a certain number of cells/pixels. kwds
+ supported include: ``px``, ``shape``, ``how`` and ``mask``.
+ kwds: Keyword options for the off-diagonal plots and datashader's spreading , optional
Returns:
--------
@@ -54,6 +75,7 @@ def scatter_matrix(data, c=None, chart='scatter', diagonal='hist',
--------
:func:`pandas.plotting.scatter_matrix` : Equivalent pandas function.
"""
+
data = _hv.Dataset(data)
supported = list(HoloViewsConverter._kind_mapping)
if diagonal not in supported:
@@ -65,6 +87,67 @@ def scatter_matrix(data, c=None, chart='scatter', diagonal='hist',
diagonal = HoloViewsConverter._kind_mapping[diagonal]
chart = HoloViewsConverter._kind_mapping[chart]
+ if rasterize or datashade:
+ try:
+ import datashader # noqa
+ except ImportError:
+ raise ImportError("rasterize and datashade require "
+ "datashader to be installed.")
+ from ..util import hv_version
+ if hv_version <= '1.14.6':
+ warnings.warn(
+ "Versions of holoviews before 1.14.7 did not suppport "
+ "dynamic update of rasterized/datashaded scatter matrix. "
+ "Update holoviews to a newer version."
+ )
+
+ if rasterize and datashade:
+ raise ValueError("Choose to either rasterize or "
+ "datashade the scatter matrix, not both.")
+
+ if not rasterize and not datashade and (spread or dynspread):
+ raise ValueError("dynspread or spread need rasterize "
+ "or datashade to be set to True.")
+
+ if rasterize:
+ import holoviews.operation.datashader as hd
+ if dynspread or spread:
+ if hd.ds_version < '0.12.0':
+ raise RuntimeError(
+ 'Any version of datashader less than 0.12.0 does '
+ 'not support rasterize with dynspread or spread.')
+
+ #remove datashade kwds
+ if datashade or rasterize:
+ import holoviews.operation.datashader as hd
+
+ ds_kwds = {}
+ if 'aggregator' in kwds:
+ ds_kwds['aggregator'] = kwds.pop('aggregator')
+
+ #remove dynspread kwds
+ sp_kwds = {}
+ if dynspread:
+ if 'max_px' in kwds:
+ sp_kwds['max_px'] = kwds.pop('max_px')
+ if 'threshold' in kwds:
+ sp_kwds['threshold'] = kwds.pop('threshold')
+ if 'shape' in kwds:
+ sp_kwds['shape'] = kwds.pop('shape')
+ if 'how' in kwds:
+ sp_kwds['how'] = kwds.pop('how')
+ if 'mask' in kwds:
+ sp_kwds['mask'] = kwds.pop('mask')
+ if spread:
+ if 'px' in kwds:
+ sp_kwds['px'] = kwds.pop('px')
+ if 'shape' in kwds:
+ sp_kwds['shape'] = kwds.pop('shape')
+ if 'how' in kwds:
+ sp_kwds['how'] = kwds.pop('how')
+ if 'mask' in kwds:
+ sp_kwds['mask'] = kwds.pop('mask')
+
if cmap and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
colors = cmap or colormap or _hv.plotting.util.process_cmap('Category10', categorical=True)
@@ -72,13 +155,20 @@ def scatter_matrix(data, c=None, chart='scatter', diagonal='hist',
chart_opts = dict(alpha=alpha, cmap=colors, tools=tools,
nonselection_alpha=nonselection_alpha, **kwds)
+ #get initial scatter matrix. No color.
grid = _hv.operation.gridmatrix(data, diagonal_type=diagonal, chart_type=chart)
+
if c:
- chart_opts['color_index'] = c
+ #change colors for scatter matrix
+ chart_opts['color'] = c
+ # Add color vdim to each plot.
grid = grid.map(lambda x: x.clone(vdims=x.vdims+[c]), 'Scatter')
+ # create a new scatter matrix with groups for each catetory, so now the histogram will
+ # show separate colors for each group.
groups = _hv.operation.gridmatrix(data.groupby(c).overlay(),
chart_type=chart,
diagonal_type=diagonal)
+ # take the correct layer from each Overlay object within the scatter matrix.
grid = (grid * groups).map(lambda x: x.get(0) if isinstance(x.get(0), chart) else x.get(1),
_hv.Overlay)
@@ -89,5 +179,18 @@ def scatter_matrix(data, c=None, chart='scatter', diagonal='hist',
'`density_kwds`.')
diagonal_kwds = diagonal_kwds or hist_kwds or density_kwds or {}
+ # set the histogram colors
diagonal_opts = dict(fill_color=_hv.Cycle(values=colors), **diagonal_kwds)
- return grid.options({chart.__name__: chart_opts, diagonal.__name__: diagonal_opts})
+ # actually changing to the same color scheme for both scatter and histogram plots.
+ grid = grid.options({chart.__name__: chart_opts, diagonal.__name__: diagonal_opts})
+
+ # Perform datashade options after all the coloring is finished.
+ if datashade or rasterize:
+ aggregatefn = hd.datashade if datashade else hd.rasterize
+ grid = grid.map(partial(aggregatefn, **ds_kwds), specs=chart)
+ if spread or dynspread:
+ spreadfn = hd.dynspread if dynspread else (hd.spread if spread else lambda z, **_: z)
+ eltype = _hv.RGB if datashade else _hv.Image
+ grid = grid.map(partial(spreadfn, **sp_kwds), specs=eltype)
+
+ return grid
diff --git a/hvplot/ui.py b/hvplot/ui.py
--- a/hvplot/ui.py
+++ b/hvplot/ui.py
@@ -10,7 +10,7 @@
kinds = set(hvConverter._kind_mapping) - set(hvConverter._gridded_types)
COLORMAPS = [cm for cm in list_cmaps() if not cm.endswith('_r_r')]
-MAX_ROWS = 10_000
+MAX_ROWS = 10000
class Controls(Viewer):
| diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
--- a/.github/workflows/test.yaml
+++ b/.github/workflows/test.yaml
@@ -19,7 +19,7 @@ jobs:
exclude:
- os: windows-latest
python-version: 2.7
- timeout-minutes: 30
+ timeout-minutes: 60
defaults:
run:
shell: bash -l {0}
@@ -57,6 +57,12 @@ jobs:
conda activate test-environment
conda list
doit develop_install -c pyviz/label/dev -o tests
+ - name: Handling the decorator package on ubuntu and python 2.7
+ if: matrix.os == 'ubuntu-latest' && startsWith(matrix.python-version, 2.)
+ run: |
+ eval "$(conda shell.bash hook)"
+ conda activate test-environment
+ conda install -c pyviz/label/dev 'decorator=4.4.2'
- name: doit develop_install py3
if: startsWith(matrix.python-version, 3.)
run: |
diff --git a/hvplot/tests/plotting/__init__.py b/hvplot/tests/plotting/__init__.py
new file mode 100644
diff --git a/hvplot/tests/plotting/testscattermatrix.py b/hvplot/tests/plotting/testscattermatrix.py
new file mode 100644
--- /dev/null
+++ b/hvplot/tests/plotting/testscattermatrix.py
@@ -0,0 +1,139 @@
+from unittest import TestCase, SkipTest
+import sys
+
+from parameterized import parameterized
+import numpy as np
+import pandas as pd
+
+from holoviews.core import GridMatrix, NdOverlay
+from holoviews.element import (
+ Bivariate,
+ Distribution,
+ HexTiles,
+ Histogram,
+ Scatter,
+)
+from hvplot import scatter_matrix
+
+class TestScatterMatrix(TestCase):
+
+ def setUp(self):
+ self.df = pd.DataFrame(np.random.randn(1000, 4), columns=['a', 'b', 'c', 'd'])
+
+ def test_returns_gridmatrix(self):
+ sm = scatter_matrix(self.df)
+ self.assertIsInstance(sm, GridMatrix)
+
+ def test_wrong_diagonal(self):
+ with self.assertRaises(ValueError):
+ scatter_matrix(self.df, diagonal='wrong')
+
+ def test_wrong_chart(self):
+ with self.assertRaises(ValueError):
+ scatter_matrix(self.df, chart='wrong')
+
+ def test_diagonal_default(self):
+ sm = scatter_matrix(self.df)
+ self.assertIsInstance(sm['a', 'a'], Histogram)
+
+ def test_offdiagonal_default(self):
+ sm = scatter_matrix(self.df)
+ self.assertIsInstance(sm['a', 'b'], Scatter)
+
+ def test_diagonal_kde(self):
+ sm = scatter_matrix(self.df, diagonal='kde')
+ self.assertIsInstance(sm['a', 'a'], Distribution)
+
+ def test_offdiagonal_bivariate(self):
+ sm = scatter_matrix(self.df, chart='bivariate')
+ self.assertIsInstance(sm['a', 'b'], Bivariate)
+
+ def test_offdiagonal_hexbin(self):
+ sm = scatter_matrix(self.df, chart='hexbin')
+ self.assertIsInstance(sm['a', 'b'], HexTiles)
+
+ def test_diagonal_kwargs_mutually_exclusive(self):
+ with self.assertRaises(TypeError):
+ scatter_matrix(self.df, diagonal_kwds=dict(a=1), hist_kwds=dict(a=1))
+ with self.assertRaises(TypeError):
+ scatter_matrix(self.df, diagonal_kwds=dict(a=1), density_kwds=dict(a=1))
+ with self.assertRaises(TypeError):
+ scatter_matrix(self.df, density_kwds=dict(a=1), hist_kwds=dict(a=1))
+
+ def test_diagonal_kwargs(self):
+ sm = scatter_matrix(self.df, diagonal_kwds=dict(line_color='red'))
+ self.assertEqual(sm['a', 'a'].opts.get().kwargs['line_color'], 'red')
+
+ def test_c(self):
+ df = self.df.copy(deep=True)
+ df['e'] = np.random.choice(list('xyz'), size=len(df))
+ sm = scatter_matrix(df, c='e')
+
+ self.assertIsInstance(sm['a', 'a'], NdOverlay)
+ diag_kdims = sm['a', 'a'].kdims
+ self.assertEqual(len(diag_kdims), 1)
+ self.assertEqual(diag_kdims[0].name, 'e')
+
+ self.assertIsInstance(sm['a', 'b'], Scatter)
+ offdiag_vdims = sm['a', 'b'].vdims
+ self.assertTrue('e' in (d.name for d in offdiag_vdims))
+
+
+class TestDatashader(TestCase):
+
+ def setUp(self):
+ try:
+ import datashader # noqa
+ except:
+ raise SkipTest('Datashader not available')
+ if sys.maxsize < 2**32:
+ raise SkipTest('Datashader does not support 32-bit systems')
+ self.df = pd.DataFrame(np.random.randn(1000, 3), columns=['a', 'b', 'c'])
+
+ def test_rasterize_datashade_mutually_exclusive(self):
+ with self.assertRaises(ValueError):
+ scatter_matrix(self.df, rasterize=True, datashade=True)
+
+ def test_spread_but_no_rasterize_or_datashade(self):
+ with self.assertRaises(ValueError):
+ scatter_matrix(self.df, dynspread=True)
+ with self.assertRaises(ValueError):
+ scatter_matrix(self.df, spread=True)
+ with self.assertRaises(ValueError):
+ scatter_matrix(self.df, dynspread=True, spread=True)
+
+ @parameterized.expand([('rasterize',), ('datashade',)])
+ def test_rasterization(self, operation):
+ sm = scatter_matrix(self.df, **{operation: True})
+ dm = sm['a', 'b']
+ self.assertEqual(dm.callback.operation.name, operation)
+ dm[()]
+ self.assertEqual(len(dm.last.pipeline.operations), 3)
+
+ @parameterized.expand([('rasterize',), ('datashade',)])
+ def test_datashade_aggregator(self, operation):
+ sm = scatter_matrix(self.df, aggregator='mean', **{operation: True})
+ dm = sm['a', 'b']
+ dm[()]
+ self.assertEqual(dm.last.pipeline.operations[-1].aggregator, 'mean')
+
+ @parameterized.expand([('spread',), ('dynspread',)])
+ def test_spread_rasterize(self, operation):
+ sm = scatter_matrix(self.df, rasterize=True, **{operation: True})
+ dm = sm['a', 'b']
+ dm[()]
+ self.assertEqual(len(dm.last.pipeline.operations), 4)
+
+ @parameterized.expand([('spread',), ('dynspread',)])
+ def test_spread_datashade(self, operation):
+ sm = scatter_matrix(self.df, datashade=True, **{operation: True})
+ dm = sm['a', 'b']
+ dm[()]
+ self.assertEqual(len(dm.last.pipeline.operations), 4)
+
+ @parameterized.expand([('spread',), ('dynspread',)])
+ def test_spread_kwargs(self, operation):
+ sm = scatter_matrix(self.df, datashade=True, **{operation: True, 'shape': 'circle'})
+ dm = sm['a', 'b']
+ dm[()]
+ self.assertEqual(dm.last.pipeline.operations[-1].args[0].keywords['shape'], 'circle')
| datashade kwarg for scatter_matrix
It would be nifty to have `datashade=True` work for the `scatter_matrix` function. I'm happy to contribute this if someone can point me in the right direction.
| In theory you should be able to append ``.map(datashade, hv.Scatter)`` to the gridmatrix call but it would indeed by nice to integrate this.
The latest implementation might just look like this:
```python
grid = grid.apply(datashade, per_element=True)
``` | 2020-07-27T18:01:34 |
holoviz/hvplot | 504 | holoviz__hvplot-504 | [
"503"
] | 9213abfcf14bd0b39b6f0f7d0b9b598c6b663ea7 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -758,6 +758,8 @@ def _process_data(self, kind, data, x, y, by, groupby, row, col,
self.hover_cols = self.variables
elif hover_cols == 'all' and not self.use_index:
self.hover_cols = [v for v in self.variables if v not in self.indexes]
+ elif hover_cols !='all' and isinstance(hover_cols,str):
+ self.hover_cols = [hover_cols]
if self.datatype in ('geopandas', 'spatialpandas'):
self.hover_cols = [c for c in self.hover_cols if c!= 'geometry']
| Using a string (instead of list) raises misleading AttributeError on hover_cols in hvplot
Issue related to this discourse [topic](https://discourse.holoviz.org/t/using-a-string-instead-of-list-raises-misleading-attributeerror-on-hover-cols-in-hvplot/1137)
| 2020-08-24T14:05:20 |
||
holoviz/hvplot | 587 | holoviz__hvplot-587 | [
"491"
] | 736379ee37f09ee162ac733b4b908ac3b49bae4c | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -1821,6 +1821,7 @@ def contour(self, x=None, y=None, z=None, data=None, filled=False):
levels = self.kwds.get('levels', 5)
if isinstance(levels, int):
opts['color_levels'] = levels
+ opts['clim'] = self._dim_ranges['c']
return contours(qmesh, filled=filled, levels=levels).opts(**opts)
def contourf(self, x=None, y=None, z=None, data=None):
| limits for contourf: clim
having the same problem as https://discourse.holoviz.org/t/how-to-set-clim-for-hvplot-quadmesh/150. didnt find the related issue, therefore raise it here
#### ALL software version info
hvplot.util.hv_version: LooseVersion ('1.13.3')
#### Description of expected behavior and the observed behavior
holoviews.QuadMesh (also contourf) has the clims argument; but clims and clim do not work for my in hvplot, therefore color axis limits change for each timestep when doing groupby
#### Complete, minimal, self-contained example code that reproduces the issue
```
import xarray as xr
rasm=xr.tutorial.load_dataset('rasm')
# clim ignored, limits change for each timestep
rasm.hvplot(kind='contourf',x='xc',y='yc',z='Tair',groupby='time', levels=40,cmap='RdBu_r',clim=(-15,15))
# works
rasm.hvplot(kind='contourf',x='xc',y='yc',z='Tair',groupby='time', levels=40,cmap='RdBu_r').opts(clim=(-15,15))
```
#### Stack traceback and/or browser JavaScript console output
#### Screenshots or screencasts of the bug in action
<img width="858" alt="image" src="https://user-images.githubusercontent.com/12237157/87934849-387c6800-ca90-11ea-85df-e26c575981e0.png">
| 2021-04-09T20:19:59 |
||
holoviz/hvplot | 601 | holoviz__hvplot-601 | [
"600"
] | 73bbf1c181265f4cdc60040ab33b59adb4fe74e7 | diff --git a/hvplot/util.py b/hvplot/util.py
--- a/hvplot/util.py
+++ b/hvplot/util.py
@@ -6,6 +6,7 @@
import sys
from distutils.version import LooseVersion
+from functools import wraps
from types import FunctionType
import pandas as pd
@@ -24,6 +25,7 @@
def with_hv_extension(func, extension='bokeh', logo=False):
"""If hv.extension is not loaded, load before calling function"""
+ @wraps(func)
def wrapper(*args, **kwargs):
if extension and not getattr(hv.extension, '_loaded', False):
hv.extension(extension, logo=logo)
| `hvplot.util.with_hv_extension` hides docstrings
#### ALL software version info
* hvplot = 0.7.0
However looking at the source code this issue should still be present in MASTER.
#### Description of expected behavior and the observed behavior
Functions wrapped with the `hvplot.util.with_hv_extension` decorator have their docstrings hidden by the `wrapper` docstring.
[`functools.wraps`](https://docs.python.org/3/library/functools.html#functools.wraps) can fix this.
#### Complete, minimal, self-contained example code that reproduces the issue
Example
```Python
>>> import hvplot
>>> help(hvplot.scatter_matrix)
```
Result:
```
Help on function wrapper in module hvplot.util:
wrapper(*args, **kwargs)
```
This is the docstring from the `wrapper` function inside `hvplot.util.with_hv_extension`.
| 2021-04-27T03:30:03 |
||
holoviz/hvplot | 602 | holoviz__hvplot-602 | [
"599"
] | cccc1cebd496f542404cf89798970e655dcb5754 | diff --git a/hvplot/plotting/scatter_matrix.py b/hvplot/plotting/scatter_matrix.py
--- a/hvplot/plotting/scatter_matrix.py
+++ b/hvplot/plotting/scatter_matrix.py
@@ -7,25 +7,52 @@
@with_hv_extension
-def scatter_matrix(data, c=None, chart='scatter', diagonal='hist', alpha=0.5, **kwds):
+def scatter_matrix(data, c=None, chart='scatter', diagonal='hist',
+ alpha=0.5, nonselection_alpha=0.1,
+ tools=None, cmap=None, colormap=None,
+ diagonal_kwds=None, hist_kwds=None, density_kwds=None,
+ **kwds):
"""
Scatter matrix of numeric columns.
+ A scatter_matrix shows all the pairwise relationships between the columns.
+ Each non-diagonal plots the corresponding columns against each other,
+ while the diagonal plot shows the distribution of each individual column.
+
+ This function is closely modelled on :func:`pandas.plotting.scatter_matrix`.
+
Parameters:
-----------
data: DataFrame
+ The data to plot. Every column is compared to every other column.
c: str, optional
Column to color by
chart: str, optional
- Chart type (one of 'scatter', 'bivariate', 'hexbin')
+ Chart type for the off-diagonal plots (one of 'scatter', 'bivariate', 'hexbin')
diagonal: str, optional
- Chart type for the diagonal (one of 'hist', 'kde')
- kwds: hvplot.scatter options, optional
+ Chart type for the diagonal plots (one of 'hist', 'kde')
+ alpha: float, optional
+ Transparency level for the off-diagonal plots
+ nonselection_alpha: float, optional
+ Transparency level for nonselected object in the off-diagonal plots
+ tools: str or list of str, optional
+ Interaction tools to include
+ Defaults are 'box_select' and 'lasso_select'
+ cmap/colormap: str or colormap object, optional
+ Colormap to use for off-diagonal plots
+ Default is `Category10 <https://github.com/d3/d3-3.x-api-reference/blob/master/Ordinal-Scales.md#category10>`.
+ diagonal_kwds/hist_kwds/density_kwds: dict, optional
+ Keyword options for the diagonal plots
+ kwds: Keyword options for the off-diagonal plots, optional
Returns:
--------
obj : HoloViews object
The HoloViews representation of the plot.
+
+ See Also
+ --------
+ :func:`pandas.plotting.scatter_matrix` : Equivalent pandas function.
"""
data = _hv.Dataset(data)
supported = list(HoloViewsConverter._kind_mapping)
@@ -38,9 +65,12 @@ def scatter_matrix(data, c=None, chart='scatter', diagonal='hist', alpha=0.5, **
diagonal = HoloViewsConverter._kind_mapping[diagonal]
chart = HoloViewsConverter._kind_mapping[chart]
- colors = _hv.plotting.util.process_cmap('Category10', categorical=True)
- chart_opts = dict(alpha=alpha, cmap=colors, tools=['box_select', 'lasso_select'],
- nonselection_alpha=0.1, **kwds)
+ if cmap and colormap:
+ raise TypeError("Only specify one of `cmap` and `colormap`.")
+ colors = cmap or colormap or _hv.plotting.util.process_cmap('Category10', categorical=True)
+ tools = tools or ['box_select', 'lasso_select']
+ chart_opts = dict(alpha=alpha, cmap=colors, tools=tools,
+ nonselection_alpha=nonselection_alpha, **kwds)
grid = _hv.operation.gridmatrix(data, diagonal_type=diagonal, chart_type=chart)
if c:
@@ -52,5 +82,12 @@ def scatter_matrix(data, c=None, chart='scatter', diagonal='hist', alpha=0.5, **
grid = (grid * groups).map(lambda x: x.get(0) if isinstance(x.get(0), chart) else x.get(1),
_hv.Overlay)
- diagonal_opts = {'fill_color': _hv.Cycle(values=colors)}
+ if (diagonal_kwds and hist_kwds) or \
+ (diagonal_kwds and density_kwds) or \
+ (hist_kwds and density_kwds):
+ raise TypeError('Specify at most one of `diagonal_kwds`, `hist_kwds`, or '
+ '`density_kwds`.')
+
+ diagonal_kwds = diagonal_kwds or hist_kwds or density_kwds or {}
+ diagonal_opts = dict(fill_color=_hv.Cycle(values=colors), **diagonal_kwds)
return grid.options({chart.__name__: chart_opts, diagonal.__name__: diagonal_opts})
| Allow specifying keyword arguments for diagonal
#### Is your feature request related to a problem? Please describe.
Although it is possible to provide keyword arguments that are passed to off-diagonal (by default scatter) plots, it isn't possible to do the same with the diagonal (by default histogram) plots. This makes it hard to set things like the bin size for the histograms.
#### Describe the solution you'd like
An argument for the function, taking a dict, which is set as the keyword arguments for the diagonal plots. Perhaps `diagonal_kwds` or `diag_kwds`. So, for example, `hvplot.scatter_matrix(data, diag_kwds={'bins': 100})`.
#### Describe alternatives you've considered
Manually specifying the options to the grid is possible, but cumbersome since it requires calculating which grid elements are the diagonals.
#### Additional context
The pandas `scatter_matrix` provides separate arguments to specify options for the diagonal and off-diagonal plots. It uses `hist_kwds` and `density_kwds`, for 'hist' and 'kde' plots, respectively. This seems redundant to me since you an only ever use one or the other.
| 2021-04-27T03:39:13 |
||
holoviz/hvplot | 659 | holoviz__hvplot-659 | [
"656"
] | 5c41b81b0c4d16d6ea58c6f775cee052cc9c5d93 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -498,6 +498,7 @@ def __init__(self, data, x, y, kind=None, by=None, use_index=True,
plot_opts['global_extent'] = global_extent
if projection:
plot_opts['projection'] = process_crs(projection)
+ title = title if title is not None else getattr(self, '_title', None)
if title is not None:
plot_opts['title'] = title
if (self.kind in self._colorbar_types or self.rasterize or self.datashade or self._color_dim):
@@ -654,6 +655,9 @@ def _process_data(self, kind, data, x, y, by, groupby, row, col,
elif is_xarray(data):
import xarray as xr
z = kwds.get('z')
+ if isinstance(data, xr.Dataset):
+ if len(data.data_vars) == 0:
+ raise ValueError("Cannot plot an empty xarray.Dataset object.")
if z is None:
if isinstance(data, xr.Dataset):
z = list(data.data_vars)[0]
@@ -703,6 +707,15 @@ def _process_data(self, kind, data, x, y, by, groupby, row, col,
if groupby:
groupby = [g for g in groupby if g not in grid]
+
+ # Add a title to hvplot.xarray plots that displays scalar coords values,
+ # as done by xarray.plot()
+ if not groupby and not grid:
+ if isinstance(da, xr.DataArray):
+ self._title = da._title_for_slice()
+ elif isinstance(da, xr.Dataset):
+ self._title = partial(xr.DataArray._title_for_slice, da)()
+
self.data = data
else:
raise ValueError('Supplied data type %s not understood' % type(data).__name__)
| diff --git a/hvplot/tests/testoptions.py b/hvplot/tests/testoptions.py
--- a/hvplot/tests/testoptions.py
+++ b/hvplot/tests/testoptions.py
@@ -1,5 +1,7 @@
from unittest import SkipTest, expectedFailure
+import numpy as np
+
from parameterized import parameterized
from holoviews import Store
@@ -298,3 +300,87 @@ def test_kde_opts(self):
self.assertEqual(opts.kwargs['bandwidth'], 0.2)
self.assertEqual(opts.kwargs['cut'], 1)
self.assertEqual(opts.kwargs['filled'], True)
+
+class TestXarrayTitle(ComparisonTestCase):
+
+ def setUp(self):
+ try:
+ import xarray as xr
+ except:
+ raise SkipTest('Xarray not available')
+ self.backend = 'bokeh'
+ hv.extension(self.backend)
+ Store.current_backend = self.backend
+ self.store_copy = OptionTree(sorted(Store.options().items()),
+ groups=Options._option_groups)
+ import hvplot.xarray # noqa
+ self.da = xr.DataArray(
+ data=np.arange(16).reshape((2, 2, 2, 2)),
+ coords={'time': [0, 1], 'y': [0, 1], 'x': [0, 1], 'band': [0, 1]},
+ dims=['time', 'y', 'x', 'band'],
+ name='test',
+ )
+ da2 = xr.DataArray(
+ data=np.arange(27).reshape((3, 3, 3)),
+ coords={'y': [0, 1, 2], 'x': [0, 1, 2]},
+ dims=['y', 'x', 'other'],
+ name='test2'
+ )
+ self.ds1 = xr.Dataset(dict(foo=self.da))
+ self.ds2 = xr.Dataset(dict(foo=self.da, bar=da2))
+
+ def tearDown(self):
+ Store.options(val=self.store_copy)
+ Store._custom_options = {k:{} for k in Store._custom_options.keys()}
+ super(TestXarrayTitle, self).tearDown()
+
+ def test_dataarray_2d_with_title(self):
+ da_sel = self.da.sel(time=0, band=0)
+ plot = da_sel.hvplot() # Image plot
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['title'], 'time = 0, band = 0')
+
+ def test_dataarray_1d_with_title(self):
+ da_sel = self.da.sel(time=0, band=0, x=0)
+ plot = da_sel.hvplot() # Line plot
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['title'], 'time = 0, x = 0, band = 0')
+
+ def test_dataarray_1d_and_by_with_title(self):
+ da_sel = self.da.sel(time=0, band=0, x=[0, 1])
+ plot = da_sel.hvplot(by='x') # Line plot with hue/by
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['title'], 'time = 0, band = 0')
+
+ def test_override_title(self):
+ da_sel = self.da.sel(time=0, band=0)
+ plot = da_sel.hvplot(title='title') # Imege plot
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['title'], 'title')
+
+ def test_dataarray_4d_line_no_title(self):
+ plot = self.da.hvplot.line(dynamic=False) # Line plot with widgets
+ opts = Store.lookup_options('bokeh', plot.last, 'plot')
+ self.assertNotIn('title', opts.kwargs)
+
+ def test_dataarray_3d_histogram_with_title(self):
+ da_sel = self.da.sel(time=0)
+ plot = da_sel.hvplot() # Histogram and no widgets
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['title'], 'time = 0')
+
+ def test_dataset_empty_raises(self):
+ with self.assertRaisesRegex(ValueError, 'empty xarray.Dataset'):
+ self.ds1.drop('foo').hvplot()
+
+ def test_dataset_one_var_behaves_like_dataarray(self):
+ ds_sel = self.ds1.sel(time=0, band=0)
+ plot = ds_sel.hvplot() # Image plot
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['title'], 'time = 0, band = 0')
+
+ def test_dataset_scatter_with_title(self):
+ ds_sel = self.ds2.sel(time=0, band=0, x=0, y=0)
+ plot = ds_sel.hvplot.scatter(x='foo', y='bar') # Image plot
+ opts = Store.lookup_options('bokeh', plot, 'plot')
+ self.assertEqual(opts.kwargs['title'], 'y = 0, x = 0, time = 0, band = 0')
| move scalar coords to title in hvplot.xarray
#### Is your feature request related to a problem? Please describe.
Upstreaming xarray .plot() methods to hvplot.xarray
#### Describe the solution you'd like
an xarray.Dataset with one data variable:
`ds["data_var"].plot()`
to match
`ds,hvplot()`
#### Describe alternatives you've considered
#### Additional context
```
import xarray as xr
import hvplot.xarray
ds = xr.tutorial.open_dataset("air_temperature").isel(lat=0, lon=0)
ds["air"].plot()
```
<img width="425" alt="Screen Shot 2021-09-06 at 10 40 38 PM" src="https://user-images.githubusercontent.com/17162724/132275954-827286c0-81c9-4cd1-943a-cfe6f393905e.png">
```
ds.hvplot()
```
<img width="721" alt="Screen Shot 2021-09-06 at 10 41 41 PM" src="https://user-images.githubusercontent.com/17162724/132276030-07e2cd2a-8019-4079-bfab-78a1ead377de.png">
| Hi @raybellwaves!
Thanks for raising this, hvplot would indeed benefit from being more coherent here with the output obtained from xarray's plot API. I'll address it soon. | 2021-09-10T14:00:39 |
holoviz/hvplot | 693 | holoviz__hvplot-693 | [
"692"
] | 25f3285c8ac315e96929b57504e4674d40b947b5 | diff --git a/hvplot/sample_data.py b/hvplot/sample_data.py
--- a/hvplot/sample_data.py
+++ b/hvplot/sample_data.py
@@ -6,10 +6,18 @@
try:
from intake import open_catalog
+ import intake_parquet # noqa
+ import intake_xarray # noqa
+ import s3fs # noqa
except:
- raise ImportError('Loading hvPlot sample data requires intake '
- 'and intake-parquet. Install it using conda or '
- 'pip before loading data.')
+ raise ImportError(
+ """Loading hvPlot sample data requires:
+ * intake
+ * intake-parquet
+ * intake-xarray
+ * s3fs
+ Install these using conda or pip before loading data."""
+ )
_file_path = os.path.dirname(__file__)
if os.path.isdir(os.path.join(_file_path, 'examples')):
| sample_data try/except import wrapper fails
#### ALL software version info
hvplot: 0.7.3
#### Description of expected behavior and the observed behavior
The following import fails, despite the all-catching `except` in the code?? (Honestly stumped)
```python
from hvplot.sample_data import us_crime, airline_flights
```
```python
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/tmp/ipykernel_3185062/1788543639.py in <module>
----> 1 from hvplot.sample_data import us_crime, airline_flights
~/miniconda3/envs/py39/lib/python3.9/site-packages/hvplot/sample_data.py in <module>
23 # Add catalogue entries to namespace
24 for _c in catalogue:
---> 25 globals()[_c] = catalogue[_c]
~/miniconda3/envs/py39/lib/python3.9/site-packages/intake/catalog/base.py in __getitem__(self, key)
398 if e.container == 'catalog':
399 return e(name=key)
--> 400 return e()
401 if isinstance(key, str) and '.' in key:
402 key = key.split('.')
~/miniconda3/envs/py39/lib/python3.9/site-packages/intake/catalog/entry.py in __call__(self, persist, **kwargs)
75 raise ValueError('Persist value (%s) not understood' % persist)
76 persist = persist or self._pmode
---> 77 s = self.get(**kwargs)
78 if persist != 'never' and isinstance(s, PersistMixin) and s.has_been_persisted:
79 from ..container.persist import store
~/miniconda3/envs/py39/lib/python3.9/site-packages/intake/catalog/local.py in get(self, **user_parameters)
287 return self._default_source
288
--> 289 plugin, open_args = self._create_open_args(user_parameters)
290 data_source = plugin(**open_args)
291 data_source.catalog_object = self._catalog
~/miniconda3/envs/py39/lib/python3.9/site-packages/intake/catalog/local.py in _create_open_args(self, user_parameters)
261
262 if len(self._plugin) == 0:
--> 263 raise ValueError('No plugins loaded for this entry: %s\n'
264 'A listing of installable plugins can be found '
265 'at https://intake.readthedocs.io/en/latest/plugin'
ValueError: No plugins loaded for this entry: parquet
A listing of installable plugins can be found at https://intake.readthedocs.io/en/latest/plugin-directory.html .
```
For reference, this is the code in 0.7.3:
```python
import os
try:
from intake import open_catalog
except:
raise ImportError('Loading hvPlot sample data requires intake '
'and intake-parquet. Install it using conda or '
'pip before loading data.')
```
How can intake throw a ValueError??
#### Complete, minimal, self-contained example code that reproduces the issue
* Have only the package `intake` installed, no other intake-subpackages.
* Execute : `from hvplot.sample_data import us_crime, airline_flights`
```
# code goes here between backticks
from hvplot.sample_data import us_crime, airline_flights
```
#### Stack traceback and/or browser JavaScript console output
```python
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/tmp/ipykernel_3185062/1788543639.py in <module>
----> 1 from hvplot.sample_data import us_crime, airline_flights
~/miniconda3/envs/py39/lib/python3.9/site-packages/hvplot/sample_data.py in <module>
23 # Add catalogue entries to namespace
24 for _c in catalogue:
---> 25 globals()[_c] = catalogue[_c]
~/miniconda3/envs/py39/lib/python3.9/site-packages/intake/catalog/base.py in __getitem__(self, key)
398 if e.container == 'catalog':
399 return e(name=key)
--> 400 return e()
401 if isinstance(key, str) and '.' in key:
402 key = key.split('.')
~/miniconda3/envs/py39/lib/python3.9/site-packages/intake/catalog/entry.py in __call__(self, persist, **kwargs)
75 raise ValueError('Persist value (%s) not understood' % persist)
76 persist = persist or self._pmode
---> 77 s = self.get(**kwargs)
78 if persist != 'never' and isinstance(s, PersistMixin) and s.has_been_persisted:
79 from ..container.persist import store
~/miniconda3/envs/py39/lib/python3.9/site-packages/intake/catalog/local.py in get(self, **user_parameters)
287 return self._default_source
288
--> 289 plugin, open_args = self._create_open_args(user_parameters)
290 data_source = plugin(**open_args)
291 data_source.catalog_object = self._catalog
~/miniconda3/envs/py39/lib/python3.9/site-packages/intake/catalog/local.py in _create_open_args(self, user_parameters)
261
262 if len(self._plugin) == 0:
--> 263 raise ValueError('No plugins loaded for this entry: %s\n'
264 'A listing of installable plugins can be found '
265 'at https://intake.readthedocs.io/en/latest/plugin'
ValueError: No plugins loaded for this entry: parquet
A listing of installable plugins can be found at https://intake.readthedocs.io/en/latest/plugin-directory.html .
```
#### Additional info
The list of required package is now this:
* intake-parquet
* intake-xarray
* s3fs
| Ah, *doh*, it's not the import fails, but the code after the import:
```python
24 for _c in catalogue:
---> 25 globals()[_c] = catalogue[_c]
```
I'm having a hard time reproducing this in a notebook due to the usage of `__file__`, would it be okay for a PR to use `importlib.resources` to find the path to the `datasets.yaml` file?
Maybe add `import intake_parquet` within the `try` section to be sure to raise the exception when that plugin is not installed?
yes, but if that trial import is acceptable (wasn't sure about performance), then I'd add intake-xarray, and s3fs as well, as those are also required? (Which makes this bug annoying as one needs to try it 3 times, before learning all those 3 missing packages.. . ;)
I would have thought those would be recursive subdependencies, but if not, then yes, import all those in the `try` block as well. To make it fail more quickly when it will fail, the first import should be the one most likely to fail (i.e. least likely to be installed in a typical environment), which I'd guess here would be `intake_parquet`.
They seem to be independent packages:
```bash
❯ mamba info intake-xarray=0.5
intake-xarray 0.5.0 pyhd8ed1ab_0
--------------------------------
file name : intake-xarray-0.5.0-pyhd8ed1ab_0.tar.bz2
name : intake-xarray
version : 0.5.0
build string: pyhd8ed1ab_0
build number: 0
channel : https://conda.anaconda.org/conda-forge/noarch
size : 1.4 MB
arch : None
constrains : ()
license : BSD-2-Clause
license_family: BSD
md5 : 43d9d1c90da0b2b28cc16e58a52a0f2b
noarch : python
package_type: noarch_python
platform : None
sha256 : 91a388e5eb015b192bc17de04c55b102576d1c1b08571a80a1a9a1bc6c878f91
subdir : noarch
timestamp : 1616085245631
url : https://conda.anaconda.org/conda-forge/noarch/intake-xarray-0.5.0-pyhd8ed1ab_0.tar.bz2
dependencies:
dask >=2.2
intake >=0.5.2
netcdf4
python >=3.5
xarray >=0.12.0
zarr
WARNING: 'conda info package_name' is deprecated.
Use 'conda search package_name --info'.
site-packages/hvplot/examples via 🐍 v3.9.9 via 🅒 py39 took 5s
❯ mamba search intake-parquet
Loading channels: done
# Name Version Build Channel
intake-parquet 0.2.1 py_0 conda-forge
intake-parquet 0.2.2 py_0 conda-forge
intake-parquet 0.2.3 py_0 conda-forge
site-packages/hvplot/examples via 🐍 v3.9.9 via 🅒 py39 took 5s
❯ mamba info intake-parquet=0.2.3
intake-parquet 0.2.3 py_0
-------------------------
file name : intake-parquet-0.2.3-py_0.tar.bz2
name : intake-parquet
version : 0.2.3
build string: py_0
build number: 0
channel : https://conda.anaconda.org/conda-forge/noarch
size : 10 KB
arch : None
constrains : ()
license : BSD-2-Clause
license_family: BSD
md5 : b7d04be2fb7b43946cf06dc5f7f04ad1
noarch : python
package_type: noarch_python
platform : None
sha256 : 2981d0998aa3e30713c6b2012a4557e77b70ed6e04778f9365c4fdeb593576ca
subdir : noarch
timestamp : 1573509119874
url : https://conda.anaconda.org/conda-forge/noarch/intake-parquet-0.2.3-py_0.tar.bz2
dependencies:
dask
fastparquet
intake >=0.3
jinja2
pandas
pyarrow
python >=3.5
WARNING: 'conda info package_name' is deprecated.
Use 'conda search package_name --info'.
```
and s3fs is obviously unrelated. Will play with it and then submit a PR.
Saw the same thing in #562.
I still feel like it is a lot to install just to run the second page in a [user guide](https://hvplot.holoviz.org/user_guide/Plotting.html). Why not just download the data with request or urllib like e.g. [bokeh](https://github.com/bokeh/bokeh/blob/branch-3.0/bokeh/util/sampledata.py) does? | 2022-01-10T23:49:38 |
|
holoviz/hvplot | 699 | holoviz__hvplot-699 | [
"697"
] | 4e3c111230a78182ddb399b4d7a77e9965931289 | diff --git a/hvplot/interactive.py b/hvplot/interactive.py
--- a/hvplot/interactive.py
+++ b/hvplot/interactive.py
@@ -20,6 +20,7 @@
def _find_widgets(op):
widgets = []
op_args = list(op['args'])+list(op['kwargs'].values())
+ op_args = hv.core.util.flatten(op_args)
for op_arg in op_args:
if 'panel' in sys.modules:
if isinstance(op_arg, Widget) and op_arg not in widgets:
| pn.widgets not working in a list
Hi, I'm trying out the hvplot .interactive. I got TypeError: 'RadioButtonGroup' object is not callable for the following code, I think it's because I can't write my xaxis in a list. It works if I change `.groupby(['origin', xaxis])` to `.groupby(xaxis)`, but it'd be nice if `pn.widgets` can work in a list. I'd really appreciate some help. Thanks!
```
from bokeh.sampledata.autompg import autompg_clean as df
import hvplot.pandas
import panel as pn
import holoviews as hv
hv.extension('bokeh')
idf = df.interactive()
cylinders = pn.widgets.IntSlider(start=4, end=8, step=2)
mfr = pn.widgets.ToggleGroup(options=['ford', 'chevrolet', 'plymouth', 'honda', 'sbaru', 'volkswagen'],
value=['ford', 'chevrolet', 'plymouth', 'honda', 'sbaru', 'volkswagen'])
xaxis = pn.widgets.RadioButtonGroup(name='X axis', options=['mpg', 'yr'])
yaxis = pn.widgets.RadioButtonGroup(name='Y axis', options=['hp', 'weight'])
ipipeline = (
idf[
(idf.cyl == cylinders) &
(idf.mfr.isin(mfr))
]
.groupby(['origin', xaxis])[yaxis].mean()
.to_frame()
.reset_index()
)
```

<details>
<summary>Traceback</summary>
```python-traceback
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/var/folders/n9/dr352bs550nck1fg2mz9vpn80000gn/T/ipykernel_46537/1739249479.py in <module>
12
13 ipipeline = (
---> 14 idf[
15 (idf.cyl == cylinders) &
16 (idf.mfr.isin(mfr))
/opt/anaconda3/envs/p39/lib/python3.9/site-packages/hvplot/interactive.py in __call__(self, *args, **kwargs)
185 accessor=True)
186 kwargs = dict(self._inherit_kwargs, **kwargs)
--> 187 clone = self._clone(method(*args, **kwargs), plot=self._method == 'plot')
188 finally:
189 self._method = None
/opt/anaconda3/envs/p39/lib/python3.9/site-packages/hvplot/interactive.py in _clone(self, transform, plot, loc, center, dmap, **kwargs)
114 depth = self._depth+1
115 kwargs = dict(self._inherit_kwargs, **dict(self._kwargs, **kwargs))
--> 116 return type(self)(self._obj, transform, plot, depth,
117 loc, center, dmap, **kwargs)
118
/opt/anaconda3/envs/p39/lib/python3.9/site-packages/hvplot/interactive.py in __init__(self, obj, transform, plot, depth, loc, center, dmap, inherit_kwargs, max_rows, **kwargs)
84 self._kwargs = kwargs
85 ds = hv.Dataset(self._obj)
---> 86 self._current = self._transform.apply(ds, keep_index=True, compute=False)
87 self._init = True
88
/opt/anaconda3/envs/p39/lib/python3.9/site-packages/holoviews/util/transform.py in apply(self, dataset, flat, expanded, ranges, all_values, keep_index, compute, strict)
736 drange = ranges.get(eldim, {})
737 drange = drange.get('combined', drange)
--> 738 data = self._apply_fn(dataset, data, fn, fn_name, args,
739 kwargs, accessor, drange)
740 drop_index = keep_index_for_compute and not keep_index
/opt/anaconda3/envs/p39/lib/python3.9/site-packages/holoviews/util/transform.py in _apply_fn(self, dataset, data, fn, fn_name, args, kwargs, accessor, drange)
634 data = method(*args, **kwargs)
635 else:
--> 636 raise e
637 else:
638 data = fn(*args, **kwargs)
/opt/anaconda3/envs/p39/lib/python3.9/site-packages/holoviews/util/transform.py in _apply_fn(self, dataset, data, fn, fn_name, args, kwargs, accessor, drange)
628 else:
629 try:
--> 630 data = method(*args, **kwargs)
631 except Exception as e:
632 if 'axis' in kwargs:
/opt/anaconda3/envs/p39/lib/python3.9/site-packages/pandas/core/frame.py in groupby(self, by, axis, level, as_index, sort, group_keys, squeeze, observed, dropna)
7624 # error: Argument "squeeze" to "DataFrameGroupBy" has incompatible type
7625 # "Union[bool, NoDefault]"; expected "bool"
-> 7626 return DataFrameGroupBy(
7627 obj=self,
7628 keys=by,
/opt/anaconda3/envs/p39/lib/python3.9/site-packages/pandas/core/groupby/groupby.py in __init__(self, obj, keys, axis, level, grouper, exclusions, selection, as_index, sort, group_keys, squeeze, observed, mutated, dropna)
886 from pandas.core.groupby.grouper import get_grouper
887
--> 888 grouper, exclusions, obj = get_grouper(
889 obj,
890 keys,
/opt/anaconda3/envs/p39/lib/python3.9/site-packages/pandas/core/groupby/grouper.py in get_grouper(obj, key, axis, level, sort, observed, mutated, validate, dropna)
875 # allow us to passing the actual Grouping as the gpr
876 ping = (
--> 877 Grouping(
878 group_axis,
879 gpr,
/opt/anaconda3/envs/p39/lib/python3.9/site-packages/pandas/core/groupby/grouper.py in __init__(self, index, grouper, obj, level, sort, observed, in_axis, dropna)
533 raise ValueError(f"Grouper for '{t}' not 1-dimensional")
534
--> 535 self.grouping_vector = index.map(self.grouping_vector)
536
537 if not (
/opt/anaconda3/envs/p39/lib/python3.9/site-packages/pandas/core/indexes/base.py in map(self, mapper, na_action)
5505 from pandas.core.indexes.multi import MultiIndex
5506
-> 5507 new_values = self._map_values(mapper, na_action=na_action)
5508
5509 attributes = self._get_attributes_dict()
/opt/anaconda3/envs/p39/lib/python3.9/site-packages/pandas/core/base.py in _map_values(self, mapper, na_action)
868
869 # mapper is a function
--> 870 new_values = map_f(values, mapper)
871
872 return new_values
/opt/anaconda3/envs/p39/lib/python3.9/site-packages/pandas/_libs/lib.pyx in pandas._libs.lib.map_infer()
TypeError: 'RadioButtonGroup' object is not callable
```
</details>
| Hi @sophiamyang .
- Could you update the code example to make it runnable. Right now imports are missing.
- Could you take a screen shot of error? It can sometimes help understand the context of the problem and provide more assurance that there is a real, reproducible problem.
Thanks.
Thanks @MarcSkovMadsen, I updated the code example and added the error message.
So I have made a somewhat working example of this problem as far as I can see (correct me if I'm wrong).
### The example
<details>
<summary>The code</summary>
``` python
import holoviews as hv
import hvplot.pandas # noqa
import panel as pn
from bokeh.sampledata.autompg import autompg_clean as df
hv.extension("bokeh")
idf = df.interactive()
cylinders = pn.widgets.IntSlider(start=4, end=8, step=2)
mfr = pn.widgets.ToggleGroup(
options=["ford", "chevrolet", "plymouth", "honda", "sbaru", "volkswagen"],
value=["ford", "chevrolet", "plymouth", "honda", "sbaru", "volkswagen"],
)
xaxis = pn.widgets.RadioButtonGroup(name="X axis", options=["mpg", "yr"])
yaxis = pn.widgets.RadioButtonGroup(name="Y axis", options=["hp", "weight"])
ipipeline = (
idf[(idf.cyl == cylinders) & (idf.mfr.isin(mfr))]
.groupby(["origin", xaxis])[yaxis]
.mean()
.to_frame()
.sort_values(xaxis)
.reset_index()
)
pn.Column(ipipeline.hvplot(x=xaxis, y=yaxis, by="origin"), ipipeline.output(), sizing_mode="stretch_both").servable()
```
</details>
https://user-images.githubusercontent.com/19758978/149788435-c0042ebc-eac5-4511-ac4e-6f66a74fec20.mp4
### Changes
I have made this change in holoviews to get this example to work.
https://github.com/holoviz/holoviews/blob/cc6b27f01710402fdfee2aeef1507425ca78c91f/holoviews/util/transform.py#L579-L606
Where I have added this to the if statement for argument and keyword arguments , respectively:
``` python
elif isinstance(arg, (list, tuple)):
arg = [resolve_dependent_value(a) for a in arg]
```
``` python
elif isinstance(v, (list, tuple)):
v = [resolve_dependent_value(a) for a in v]
```
Both is done to convert the widget into an actual value.
### Problems
There is some problems I have noticed:
- The plot is not updating when changing the cylinder slider, but it updates the DataFrame.
- I can't disable "volkswagen" in the `ToggleGroup`.
Edit 1: Updated with better changes and removed a problem.
The reason why is the plot is not updating when I change the cylinder slider is that both it and the mfr toggle group have no name. This means that if I change the order of them in the ipipeline it is the toggle group which becomes unresponsive. If I give one of the widget a name both of them will work. This should be fixed upstream in holoviews to avoid it overwriting widgets with no (or the same name).
Thanks so much @Hoxbro!
No problem, glad I could help - so you're able to share with the world how awesome the holoviz stack is.
For the last of the problems about how I can't disable "volkswagen". It is because a HSpacer is blocking it and if i add `pn.extension(sizing_mode="stretch_width")` I'm able to click on it. | 2022-01-22T14:37:25 |
|
holoviz/hvplot | 714 | holoviz__hvplot-714 | [
"713"
] | fa26122e968f8fa3c4f843740e3850b97dc99994 | diff --git a/hvplot/plotting/core.py b/hvplot/plotting/core.py
--- a/hvplot/plotting/core.py
+++ b/hvplot/plotting/core.py
@@ -8,7 +8,7 @@
panel_available = False
from ..converter import HoloViewsConverter
-from ..util import process_dynamic_args
+from ..util import is_list_like, process_dynamic_args
class hvPlotBase:
@@ -33,8 +33,8 @@ def __call__(self, x=None, y=None, kind=None, **kwds):
Parameters
----------
- x, y : string, optional
- Field name in the data to draw x- and y-positions from
+ x, y : string, list, or array-like, optional
+ Field name(s) in the data to draw x- and y-positions from
kind : string, optional
The kind of plot to generate, e.g. 'line', 'scatter', etc.
**kwds : optional
@@ -44,6 +44,10 @@ def __call__(self, x=None, y=None, kind=None, **kwds):
-------
HoloViews object: Object representing the requested visualization
"""
+ # Convert an array-like to a list
+ x = list(x) if is_list_like(x) else x
+ y = list(y) if is_list_like(y) else y
+
if isinstance(kind, str) and kind not in self.__all__:
raise NotImplementedError("kind='{kind}' for data of type {type}".format(
kind=kind, type=type(self._data)))
diff --git a/hvplot/util.py b/hvplot/util.py
--- a/hvplot/util.py
+++ b/hvplot/util.py
@@ -8,6 +8,7 @@
from packaging.version import Version
from types import FunctionType
+import numpy as np
import pandas as pd
import holoviews as hv
import param
@@ -252,6 +253,21 @@ def process_crs(crs):
raise ValueError("Projection must be defined as a EPSG code, proj4 string, cartopy CRS or pyproj.Proj.")
return crs
+
+def is_list_like(obj):
+ """
+ Adapted from pandas' is_list_like cython function.
+ """
+ return (
+ # equiv: `isinstance(obj, abc.Iterable)`
+ hasattr(obj, "__iter__") and not isinstance(obj, type)
+ # we do not count strings/unicode/bytes as list-like
+ and not isinstance(obj, (str, bytes))
+ # exclude zero-dimensional numpy arrays, effectively scalars
+ and not (isinstance(obj, np.ndarray) and obj.ndim == 0)
+ )
+
+
def is_tabular(data):
if check_library(data, ['dask', 'streamz', 'pandas', 'geopandas', 'cudf']):
return True
| diff --git a/hvplot/tests/plotting/testcore.py b/hvplot/tests/plotting/testcore.py
new file mode 100644
--- /dev/null
+++ b/hvplot/tests/plotting/testcore.py
@@ -0,0 +1,21 @@
+import numpy as np
+import pandas as pd
+import hvplot.pandas # noqa
+
+import pytest
+
[email protected]("y", (
+ ["A", "B", "C", "D"],
+ ("A", "B", "C", "D"),
+ {"A", "B", "C", "D"},
+ np.array(["A", "B", "C", "D"]),
+ pd.Index(["A", "B", "C", "D"]),
+ pd.Series(["A", "B", "C", "D"]),
+ ))
+def test_diffent_input_types(y):
+ df = pd._testing.makeDataFrame()
+ types = {t for t in dir(df.hvplot) if not t.startswith("_")}
+ ignore_types = {'bivariate', 'heatmap', 'hexbin', 'labels', 'vectorfield'}
+
+ for t in types - ignore_types:
+ df.hvplot(y=y, kind=t)
diff --git a/hvplot/tests/testutil.py b/hvplot/tests/testutil.py
--- a/hvplot/tests/testutil.py
+++ b/hvplot/tests/testutil.py
@@ -4,11 +4,12 @@
import sys
import numpy as np
+import pandas as pd
import pytest
from unittest import TestCase, SkipTest
-from hvplot.util import check_crs, process_xarray
+from hvplot.util import check_crs, is_list_like, process_xarray
class TestProcessXarray(TestCase):
@@ -275,3 +276,15 @@ def test_check_crs():
assert p.srs == '+proj=utm +zone=15 +datum=NAD83 +units=m +no_defs'
p = check_crs('wrong')
assert p is None
+
+
+def test_is_list_like():
+ assert not is_list_like(0)
+ assert not is_list_like('string')
+ assert not is_list_like(np.array('a'))
+ assert is_list_like(['a', 'b'])
+ assert is_list_like(('a', 'b'))
+ assert is_list_like({'a', 'b'})
+ assert is_list_like(pd.Series(['a', 'b']))
+ assert is_list_like(pd.Index(['a', 'b']))
+ assert is_list_like(np.array(['a', 'b']))
| Improve handling of arraylike as input for hvplot
#### ALL software version info
```
pandas 1.3.5
hvplot 0.8.0a11.post9+gfbcc04e
```
#### Description of expected behavior and the observed behavior
When passing an array type like `df.columns` into `hvplot` it raises `ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()`.
I think there is two solution for this problem;
* one is either to support array-like formats like `numpy` or `pandas` as input for `hvplot`,
* the other options is to raise a better error message when passing in a array-like type.
#### Complete, minimal, self-contained example code that reproduces the issue
``` python
import pandas as pd
import hvplot.pandas
df = pd._testing.makeDataFrame()
df.hvplot(y=df.columns) # Raises ValueError
df.hvplot(y=list(df.columns)) # Works fine
```
| I'd vote for automatically converting array-like formats into lists where appropriate. Not sure if it also makes sense to accept iterators or generators as well. | 2022-03-20T16:12:00 |
holoviz/hvplot | 781 | holoviz__hvplot-781 | [
"646"
] | f2f1b6c56e2a09399799ee8fa002c28b04d28e7f | diff --git a/hvplot/interactive.py b/hvplot/interactive.py
--- a/hvplot/interactive.py
+++ b/hvplot/interactive.py
@@ -5,7 +5,7 @@
import abc
import operator
import sys
-
+from functools import partial
from types import FunctionType, MethodType
import holoviews as hv
@@ -17,6 +17,7 @@
from panel.util import get_method_owner, full_groupby
from panel.widgets.base import Widget
+from .converter import HoloViewsConverter
from .util import _flatten, is_tabular, is_xarray, is_xarray_dataarray
@@ -44,7 +45,7 @@ def _find_widgets(op):
return widgets
-class Interactive():
+class Interactive:
"""
Interactive is a wrapper around a Python object that lets users create
interactive pipelines by calling existing APIs on an object with
@@ -112,6 +113,7 @@ def __init__(self, obj, transform=None, fn=None, plot=False, depth=0,
ds = hv.Dataset(self._obj)
self._current = self._transform.apply(ds, keep_index=True, compute=False)
self._init = True
+ self.hvplot = _hvplot(self)
def _update_obj(self, *args):
self._obj = self._fn.eval(self._fn.object)
@@ -127,7 +129,7 @@ def _fn_params(self):
parameterized = get_method_owner(self._fn.object)
deps = parameterized.param.method_dependencies(self._fn.object.__name__)
return deps
-
+
@property
def _params(self):
ps = self._fn_params
@@ -394,13 +396,6 @@ def get_ax():
transform = type(transform)(transform, 'plot', accessor=True)
return new._clone(transform(*args, **kwargs), plot=True)
- def hvplot(self, *args, **kwargs):
- new = self._resolve_accessor()
- transform = new._transform
- transform = type(transform)(transform, 'hvplot', accessor=True)
- dmap = 'kind' not in kwargs
- return new._clone(transform(*args, **kwargs), dmap=dmap)
-
#----------------------------------------------------------------
# Public API
#----------------------------------------------------------------
@@ -512,3 +507,37 @@ def widgets(self):
if w not in widgets:
widgets.append(w)
return pn.Column(*widgets)
+
+
+class _hvplot:
+ _kinds = tuple(HoloViewsConverter._kind_mapping)
+
+ __slots__ = ["_interactive"]
+
+ def __init__(self, _interactive):
+ self._interactive = _interactive
+
+ def __call__(self, *args, _kind=None, **kwargs):
+ # The underscore in _kind is to not overwrite it
+ # if 'kind' is in kwargs and the function
+ # is used with partial.
+ if _kind and "kind" in kwargs:
+ raise TypeError(f"{_kind}() got an unexpected keyword argument 'kind'")
+ if _kind:
+ kwargs["kind"] = _kind
+
+ new = self._interactive._resolve_accessor()
+ transform = new._transform
+ transform = type(transform)(transform, 'hvplot', accessor=True)
+ dmap = 'kind' not in kwargs or not isinstance(kwargs['kind'], str)
+ return new._clone(transform(*args, **kwargs), dmap=dmap)
+
+ def __getattr__(self, attr):
+ if attr in self._kinds:
+ return partial(self, _kind=attr)
+ else:
+ raise AttributeError(f"'hvplot' object has no attribute '{attr}'")
+
+ def __dir__(self):
+ # This function is for autocompletion
+ return self._interactive._obj.hvplot.__all__
| diff --git a/hvplot/tests/testinteractive.py b/hvplot/tests/testinteractive.py
--- a/hvplot/tests/testinteractive.py
+++ b/hvplot/tests/testinteractive.py
@@ -1,13 +1,21 @@
import pandas as pd
import panel as pn
+import pytest
from holoviews.util.transform import dim
-import hvplot.pandas
+import hvplot.pandas # noqa
from hvplot import bind
from hvplot.interactive import Interactive
from hvplot.xarray import XArrayInteractive
+try:
+ import xarray as xr
+ import hvplot.xarray # noqa
+except ImportError:
+ xr = None
+
+xr_available = pytest.mark.skipif(xr is None, reason="requires xarray")
def test_interactive_pandas_dataframe():
df = pd._testing.makeMixedDataFrame()
@@ -29,8 +37,8 @@ def test_interactive_pandas_series():
assert dfi._fn is None
assert dfi._transform == dim('*')
+@xr_available
def test_interactive_xarray_dataarray():
- import xarray as xr
ds = xr.tutorial.load_dataset('air_temperature')
dsi = Interactive(ds.air)
@@ -40,8 +48,8 @@ def test_interactive_xarray_dataarray():
assert dsi._fn is None
assert dsi._transform == dim('air')
+@xr_available
def test_interactive_xarray_dataset():
- import xarray as xr
ds = xr.tutorial.load_dataset('air_temperature')
dsi = Interactive(ds)
@@ -55,7 +63,7 @@ def test_interactive_pandas_function():
df = pd._testing.makeMixedDataFrame()
select = pn.widgets.Select(options=list(df.columns))
-
+
def sel_col(col):
return df[col]
@@ -68,13 +76,13 @@ def sel_col(col):
select.value = 'B'
assert dfi._obj is df.B
+@xr_available
def test_interactive_xarray_function():
- import xarray as xr
ds = xr.tutorial.load_dataset('air_temperature')
ds['air2'] = ds.air*2
select = pn.widgets.Select(options=list(ds))
-
+
def sel_col(sel):
return ds[sel]
@@ -89,6 +97,27 @@ def sel_col(sel):
assert dsi._transform == dim('air2')
+def test_interactive_pandas_dataframe_accessor():
+ df = pd._testing.makeMixedDataFrame()
+ dfi = df.interactive()
+
+ assert dfi.hvplot(kind="scatter")._transform == dfi.hvplot.scatter()._transform
+
+ with pytest.raises(TypeError):
+ dfi.hvplot.scatter(kind="area")
+
+
+@xr_available
+def test_interactive_xarray_dataset_accessor():
+ ds = xr.tutorial.load_dataset('air_temperature')
+ dsi = ds.air.interactive
+
+ assert dsi.hvplot(kind="line")._transform == dsi.hvplot.line()._transform
+
+ with pytest.raises(TypeError):
+ dsi.hvplot.line(kind="area")
+
+
def test_interactive_with_bound_function_calls():
df = pd.DataFrame({"species": [1, 1, 2, 2], "sex": 2 * ["MALE", "FEMALE"]})
| Using quadmesh with interactive
`AttributeError: 'function' object has no attribute 'quadmesh'`
```
ds.air.interactive.hvplot.quadmesh()
```
| Hmm; none of the accessors seem to work with xarray .interactive (.scatter, .points, etc.). Reproducer:
```
import xarray as xr, hvplot.xarray
ds = xr.tutorial.open_dataset('air_temperature').load()
ds.air.interactive.hvplot.points()
``` | 2022-07-18T08:08:07 |
holoviz/hvplot | 799 | holoviz__hvplot-799 | [
"787"
] | bc222fac54d289a47018ee45127f6177ff4e828b | diff --git a/hvplot/__init__.py b/hvplot/__init__.py
--- a/hvplot/__init__.py
+++ b/hvplot/__init__.py
@@ -68,6 +68,7 @@
from .converter import HoloViewsConverter
from .interactive import Interactive
+from .ui import explorer # noqa
from .utilities import hvplot_extension, output, save, show # noqa
from .plotting import (hvPlot, hvPlotTabular, # noqa
andrews_curves, lag_plot,
diff --git a/hvplot/ui.py b/hvplot/ui.py
--- a/hvplot/ui.py
+++ b/hvplot/ui.py
@@ -1,3 +1,4 @@
+import holoviews as _hv
import numpy as np
import panel as pn
import param
@@ -7,17 +8,18 @@
from holoviews.plotting.util import list_cmaps
from panel.viewable import Viewer
-from .converter import HoloViewsConverter as hvConverter
+from .converter import HoloViewsConverter as _hvConverter
+from .plotting import hvPlot as _hvPlot
from .util import is_geodataframe, is_xarray
# Defaults
-DATAFRAME_KINDS = sorted(set(hvConverter._kind_mapping) - set(hvConverter._gridded_types))
-GRIDDED_KINDS = sorted(hvConverter._kind_mapping)
+DATAFRAME_KINDS = sorted(set(_hvConverter._kind_mapping) - set(_hvConverter._gridded_types))
+GRIDDED_KINDS = sorted(_hvConverter._kind_mapping)
GEOM_KINDS = ['paths', 'polygons', 'points']
STATS_KINDS = ['hist', 'kde', 'boxwhisker', 'violin', 'heatmap', 'bar', 'barh']
TWOD_KINDS = ['bivariate', 'heatmap', 'hexbin', 'labels', 'vectorfield'] + GEOM_KINDS
CMAPS = [cm for cm in list_cmaps() if not cm.endswith('_r_r')]
-DEFAULT_CMAPS = hvConverter._default_cmaps
+DEFAULT_CMAPS = _hvConverter._default_cmaps
GEO_FEATURES = [
'borders', 'coastline', 'land', 'lakes', 'ocean', 'rivers',
'states', 'grid'
@@ -27,6 +29,26 @@
MAX_ROWS = 10000
+def explorer(data, **kwargs):
+ """Explore your data by building a plot in a Panel UI component.
+
+ This function returns a Panel component that has on the right-side
+ hand a plot view and on the left-side hand a number of widgets that
+ control the plot.
+
+ Parameters
+ ----------
+ data : pandas.DataFrame
+ Data structure to explore.
+
+ Returns
+ -------
+ hvplotExporer
+ Panel component to explore a dataset.
+ """
+ return hvPlotExplorer.from_data(data, **kwargs)
+
+
class Controls(Viewer):
explorer = param.ClassSelector(class_=Viewer, precedence=-1)
@@ -81,7 +103,7 @@ def __init__(self, data, **params):
@property
def colormapped(self):
- if self.explorer.kind in hvConverter._colorbar_types:
+ if self.explorer.kind in _hvConverter._colorbar_types:
return True
return self.color is not None and self.color in self._data
@@ -89,7 +111,7 @@ def colormapped(self):
def _update_coloropts(self):
if not self.colormapped or self.cmap not in list(DEFAULT_CMAPS.values()):
return
- if self.explorer.kind in hvConverter._colorbar_types:
+ if self.explorer.kind in _hvConverter._colorbar_types:
key = 'diverging' if self.symmetric else 'linear'
self.colorbar = True
elif self.color in self._data:
@@ -112,7 +134,7 @@ class Style(Controls):
class Axes(Controls):
- legend = param.Selector(default='right', objects=hvConverter._legend_positions)
+ legend = param.Selector(default='right', objects=_hvConverter._legend_positions)
logx = param.Boolean(default=False)
@@ -278,23 +300,29 @@ class hvPlotExplorer(Viewer):
groupby = param.ListSelector(default=[])
+ # Controls that will show up as new tabs, must be ClassSelector
+
axes = param.ClassSelector(class_=Axes)
colormapping = param.ClassSelector(class_=Colormapping)
labels = param.ClassSelector(class_=Labels)
- geo = param.ClassSelector(class_=Geo)
+ # Hide the geo tab until it's better supported
+ # geo = param.ClassSelector(class_=Geo)
operations = param.ClassSelector(class_=Operations)
style = param.ClassSelector(class_=Style)
- def __new__(cls, data, **params):
+ @classmethod
+ def from_data(cls, data, **params):
if is_geodataframe(data):
- cls = hvGeomExplorer
+ # cls = hvGeomExplorer
+ raise TypeError('GeoDataFrame objects not yet supported.')
elif is_xarray(data):
- cls = hvGridExplorer
+ # cls = hvGridExplorer
+ raise TypeError('Xarray objects not yet supported.')
else:
cls = hvDataFrameExplorer
return cls(data, **params)
@@ -306,11 +334,19 @@ def __init__(self, df, **params):
x, y = params.get('x'), params.get('y')
if 'y' in params:
params['y_multi'] = params.pop('y') if isinstance(params['y'], list) else [params['y']]
- converter = hvConverter(
- df, x, y, **{k: v for k, v in params.items() if k not in ('x', 'y', 'y_multi')}
+ converter = _hvConverter(
+ df, x, y,
+ **{k: v for k, v in params.items() if k not in ('x', 'y', 'y_multi')}
)
controller_params = {}
- for cls in param.concrete_descendents(Controls).values():
+ # Assumes the controls aren't passed on instantiation.
+ controls = [
+ p.class_
+ for p in self.param.params().values()
+ if isinstance(p, param.ClassSelector)
+ and issubclass(p.class_, Controls)
+ ]
+ for cls in controls:
controller_params[cls] = {
k: params.pop(k) for k, v in dict(params).items()
if k in cls.param
@@ -320,7 +356,7 @@ def __init__(self, df, **params):
self._converter = converter
self._controls = pn.Param(
self.param, parameters=['kind', 'x', 'y', 'by', 'groupby'],
- sizing_mode='stretch_width', max_width=300
+ sizing_mode='stretch_width', max_width=300, show_name=False,
)
self.param.watch(self._toggle_controls, 'kind')
self.param.watch(self._check_y, 'y_multi')
@@ -329,13 +365,13 @@ def __init__(self, df, **params):
self._tabs = pn.Tabs(
tabs_location='left', width=400
)
- controllers = {
+ self._controllers = {
cls.name.lower(): cls(df, explorer=self, **params)
for cls, params in controller_params.items()
}
- self.param.set_param(**controllers)
+ self.param.set_param(**self._controllers)
self.param.watch(self._plot, list(self.param))
- for controller in controllers.values():
+ for controller in self._controllers.values():
controller.param.watch(self._plot, list(controller.param))
self._alert = pn.pane.Alert(
alert_type='danger', visible=False, sizing_mode='stretch_width'
@@ -350,7 +386,8 @@ def __init__(self, df, **params):
pn.layout.HSpacer(),
sizing_mode='stretch_both'
)
- self.param.trigger('kind')
+ self._toggle_controls()
+ self._plot()
def _populate(self):
variables = self._converter.variables
@@ -366,12 +403,11 @@ def _populate(self):
else:
p.objects = variables_no_index
- # Setting the default value
- if pname == "x" or pname == "y":
+ # Setting the default value if not set
+ if (pname == "x" or pname == "y") and getattr(self, pname, None) is None:
setattr(self, pname, p.objects[0])
def _plot(self, *events):
- self._layout.loading = True
y = self.y_multi if 'y_multi' in self._controls.parameters else self.y
if isinstance(y, list) and len(y) == 1:
y = y[0]
@@ -390,14 +426,15 @@ def _plot(self, *events):
df = self._data
if len(df) > MAX_ROWS and not (self.kind in STATS_KINDS or kwargs.get('rasterize') or kwargs.get('datashade')):
df = df.sample(n=MAX_ROWS)
+ self._layout.loading = True
try:
- plot = df.hvplot(
+ self._hvplot = _hvPlot(df)(
kind=self.kind, x=self.x, y=y, by=self.by, groupby=self.groupby, **kwargs
)
- hvplot = pn.pane.HoloViews(
- plot, sizing_mode='stretch_width', margin=(0, 20, 0, 20)
+ self._hvpane = pn.pane.HoloViews(
+ self._hvplot, sizing_mode='stretch_width', margin=(0, 20, 0, 20)
).layout
- self._layout[1][1] = hvplot
+ self._layout[1][1] = self._hvpane
self._alert.visible = False
except Exception as e:
self._alert.param.set_param(
@@ -413,15 +450,15 @@ def _single_y(self):
return True
return False
- def _toggle_controls(self, event):
+ def _toggle_controls(self, event=None):
# Control high-level parameters
visible = True
- if event.new in ('table', 'dataset'):
+ if event and event.new in ('table', 'dataset'):
parameters = ['kind', 'columns']
visible = False
- elif event.new in TWOD_KINDS:
+ elif event and event.new in TWOD_KINDS:
parameters = ['kind', 'x', 'y', 'by', 'groupby']
- elif event.new in ('hist', 'kde', 'density'):
+ elif event and event.new in ('hist', 'kde', 'density'):
self.x = None
parameters = ['kind', 'y_multi', 'by', 'groupby']
else:
@@ -435,15 +472,15 @@ def _toggle_controls(self, event):
('Axes', pn.Param(self.axes, widgets={
'xlim': {'throttled': True},
'ylim': {'throttled': True}
- })),
+ }, show_name=False)),
('Labels', pn.Param(self.labels, widgets={
'rot': {'throttled': True}
- })),
+ }, show_name=False)),
('Style', self.style),
('Operations', self.operations),
- ('Geo', self.geo)
+ # ('Geo', self.geo)
]
- if event.new not in ('area', 'kde', 'line', 'ohlc', 'rgb', 'step'):
+ if event and event.new not in ('area', 'kde', 'line', 'ohlc', 'rgb', 'step'):
tabs.insert(5, ('Colormapping', self.colormapping))
self._tabs[:] = tabs
@@ -455,14 +492,79 @@ def _check_by(self, event):
if event.new and 'y_multi' in self._controls.parameters and self.y_multi and len(self.y_multi) > 1:
self.by = []
+ #----------------------------------------------------------------
+ # Public API
+ #----------------------------------------------------------------
+
+ def hvplot(self):
+ """Return the plot as a HoloViews object.
+ """
+ return self._hvplot.clone()
+
+ def plot_code(self, var_name='df'):
+ """Return a string representation that can be easily copy-pasted
+ in a notebook cell to create a plot from a call to the `.hvplot`
+ accessor, and that includes all the customized settings of the explorer.
+
+ >>> hvexplorer.plot_code(var_name='data')
+ "data.hvplot(x='time', y='value')"
+
+ Parameters
+ ----------
+ var_name: string
+ Data variable name by which the returned string will start.
+ """
+ settings = self.settings()
+ args = ''
+ if settings:
+ for k, v in settings.items():
+ args += f'{k}={v!r}, '
+ args = args[:-2]
+ return f'{var_name}.hvplot({args})'
+
+ def save(self, filename, **kwargs):
+ """Save the plot to file.
+
+ Calls the `holoviews.save` utility, refer to its documentation
+ for a full description of the available kwargs.
+
+ Parameters
+ ----------
+ filename: string, pathlib.Path or IO object
+ The path or BytesIO/StringIO object to save to
+ """
+ _hv.save(self._hvplot, filename, **kwargs)
+
+ def settings(self):
+ """Return a dictionary of the customized settings.
+
+ This dictionary can be reused as an unpacked input to the explorer or
+ a call to the `.hvplot` accessor.
+
+ >>> hvplot.explorer(df, **settings)
+ >>> df.hvplot(**settings)
+ """
+ settings = {}
+ for controller in self._controllers.values():
+ params = set(controller.param) - set(['name', 'explorer'])
+ for p in params:
+ value = getattr(controller, p)
+ if value != controller.param[p].default:
+ settings[p] = value
+ for p in self._controls.parameters:
+ value = getattr(self, p)
+ if value != self.param[p].default:
+ settings[p] = value
+ if 'y_multi' in settings:
+ settings['y'] = settings.pop('y_multi')
+ settings = {k: v for k, v in sorted(list(settings.items()))}
+ return settings
+
class hvGeomExplorer(hvPlotExplorer):
kind = param.Selector(default=None, objects=sorted(GEOM_KINDS))
- def __new__(cls, data, **params):
- return super(hvPlotExplorer, cls).__new__(cls)
-
@property
def _single_y(self):
return True
@@ -488,9 +590,6 @@ class hvGridExplorer(hvPlotExplorer):
kind = param.Selector(default=None, objects=sorted(GRIDDED_KINDS))
- def __new__(cls, data, **params):
- return super(hvPlotExplorer, cls).__new__(cls)
-
@property
def _x(self):
return (self._converter.x or self._converter.indexes[0]) if self.x is None else self.x
@@ -527,9 +626,6 @@ class hvDataFrameExplorer(hvPlotExplorer):
kind = param.Selector(default='line', objects=sorted(DATAFRAME_KINDS))
- def __new__(cls, data, **params):
- return super(hvPlotExplorer, cls).__new__(cls)
-
@property
def xcat(self):
if self.kind in ('bar', 'box', 'violin'):
| diff --git a/hvplot/tests/testui.py b/hvplot/tests/testui.py
new file mode 100644
--- /dev/null
+++ b/hvplot/tests/testui.py
@@ -0,0 +1,97 @@
+import holoviews as hv
+import hvplot.pandas
+import pytest
+
+try:
+ from bokeh.sampledata import penguins
+except ImportError:
+ penguins = None
+
+from hvplot.ui import hvDataFrameExplorer
+
+pytestmark = pytest.mark.skipif(
+ penguins is None,
+ reason='Penguins dataset not available on Python 3.6',
+)
+
+df = penguins.data if penguins is not None else None
+
+
+def test_explorer_basic():
+ explorer = hvplot.explorer(df)
+
+ assert isinstance(explorer, hvDataFrameExplorer)
+ assert explorer.kind == 'line'
+ assert explorer.x == 'index'
+ assert explorer.y == 'species'
+
+
+def test_explorer_settings():
+ explorer = hvplot.explorer(df)
+
+ explorer.param.set_param(
+ kind='scatter',
+ x='bill_length_mm',
+ y_multi=['bill_depth_mm'],
+ by=['species'],
+ )
+
+ settings = explorer.settings()
+
+ assert settings == dict(
+ by=['species'],
+ kind='scatter',
+ x='bill_length_mm',
+ y=['bill_depth_mm'],
+ )
+
+
+def test_explorer_plot_code():
+ explorer = hvplot.explorer(df)
+
+ explorer.param.set_param(
+ kind='scatter',
+ x='bill_length_mm',
+ y_multi=['bill_depth_mm'],
+ by=['species'],
+ )
+
+ hvplot_code = explorer.plot_code()
+
+ assert hvplot_code == "df.hvplot(by=['species'], kind='scatter', x='bill_length_mm', y=['bill_depth_mm'])"
+
+ hvplot_code = explorer.plot_code(var_name='othername')
+
+ assert hvplot_code == "othername.hvplot(by=['species'], kind='scatter', x='bill_length_mm', y=['bill_depth_mm'])"
+
+
+def test_explorer_hvplot():
+ explorer = hvplot.explorer(df)
+
+ explorer.param.set_param(
+ kind='scatter',
+ x='bill_length_mm',
+ y_multi=['bill_depth_mm'],
+ )
+
+ plot = explorer.hvplot()
+
+ assert isinstance(plot, hv.Scatter)
+ assert plot.kdims[0].name == 'bill_length_mm'
+ assert plot.vdims[0].name == 'bill_depth_mm'
+
+
+def test_explorer_save(tmp_path):
+ explorer = hvplot.explorer(df)
+
+ explorer.param.set_param(
+ kind='scatter',
+ x='bill_length_mm',
+ y_multi=['bill_depth_mm'],
+ )
+
+ outfile = tmp_path / 'plot.html'
+
+ explorer.save(outfile)
+
+ assert outfile.exists()
| Add options for saving hvPlotExplorer settings and only show the plot
Maybe I have overlooked something, but it could be nice to be able to save the settings from `hvPlotExplorer` so it can easily be recreated in another session.
I'm thinking about something like this:
``` python
## First input cell in a notebook
import hvplot.ui
import hvplot.pandas
import panel as pn
pn.extension()
from bokeh.sampledata import penguins
df = penguins.data
dfe = hvplot.ui.hvPlotExplorer(df)
## First output cell
# Play around with the Explorer until you have the plot you want
## Second input cell
dfe_settings = dfe.save() # Returns a dict with the settings, which could be saved to a JSON file.
## Third input cell or another notebook where you have loaded the settings
# Load the settings into a new hvPlotExplorer
dfe2 = hvplot.ui.hvPlotExplorer.load(df, dfe_settings)
# or
dfe2 = hvplot.ui.hvPlotExplorer(df, settings=dfe_settings)
```
Here a natural thing would be to only show the plot because all the settings have already been determined. Maybe with a function call like `dfe2.output()` or `dfe2.plot()`.
| Yes, definitely! To demo in the docs I think you'd use code like:
```python
# First notebook cell
settings = {}
if os.exists("somefile.json"):
with open("settings.json") as f:
settings = json.load(f)
dfe = hvplot.ui.hvPlotExplorer(df, **settings)
# Second notebook cell
with open("settings.json", 'w') as f:
json.dump(settings, f)
```
That way the notebook will run properly the first time and when used with Run All, but when run cell-by-cell it will save the settings selected interactively for use next time.
| 2022-07-26T21:53:20 |
holoviz/hvplot | 811 | holoviz__hvplot-811 | [
"785"
] | 0311cdcdc89d836be58c1320dbf97d62a1fa6b30 | diff --git a/hvplot/ui.py b/hvplot/ui.py
--- a/hvplot/ui.py
+++ b/hvplot/ui.py
@@ -348,17 +348,25 @@ def __init__(self, df, **params):
pn.layout.HSpacer(),
sizing_mode='stretch_both'
)
- self._plot()
self.param.trigger('kind')
def _populate(self):
variables = self._converter.variables
+ indexes = getattr(self._converter, "indexes", [])
+ variables_no_index = [v for v in variables if v not in indexes]
for pname in self.param:
if pname == 'kind':
continue
p = self.param[pname]
if isinstance(p, param.Selector):
- p.objects = variables
+ if pname == "x":
+ p.objects = variables
+ else:
+ p.objects = variables_no_index
+
+ # Setting the default value
+ if pname == "x" or pname == "y":
+ setattr(self, pname, p.objects[0])
def _plot(self, *events):
self._layout.loading = True
| hvPlotExplorer issues with Fields pane
Some issues I have noticed when working with the Fields pane of the hvPlotExplorer.
Code used:
``` python
import hvplot.ui
import hvplot.pandas
import panel as pn
pn.extension()
from bokeh.sampledata import penguins
df = penguins.data
hvplot.ui.hvPlotExplorer(df)
```
### 1. Some kinds only show questionsmark in HoverTools
Kinds are `area`, `density`, and `violin`

### 2. Changing y to `index` as the first thing raises an exception that is not caught.
https://user-images.githubusercontent.com/19758978/180437152-de647b60-2833-4efa-a524-bae11657595b.mp4
### 3. Kind=`errorbars` fails with `Dimension 2 not found`

### 4. Kind=`heatmap` collapses the UI
https://user-images.githubusercontent.com/19758978/180438111-8bcf375e-2941-4ec3-80e4-b0d191e2e576.mp4
| ### Point 1
Not related to Explorer but a Holoviews problem, as I can get it to work with a Bokeh.
https://user-images.githubusercontent.com/19758978/181917501-499270b6-c34a-4b19-a6da-f3ea1a653ef0.mp4
``` python
import holoviews as hv
from bokeh.models import HoverTool
hv.extension("bokeh")
hover = HoverTool(tooltips=[("x", "@x"), ("y", "@{y}")])
hv.Area((range(100), range(100))).opts(tools=[hover])
from bokeh.plotting import figure, show
from bokeh.models import HoverTool, ColumnDataSource
source = ColumnDataSource(dict(x=range(100),y=range(100)))
p = figure(width=300, height=300)
p.varea(x='x', y1=0, y2='y', source=source)
p.add_tools(hover)
show(p)
``` | 2022-07-30T18:02:17 |
|
holoviz/hvplot | 892 | holoviz__hvplot-892 | [
"891"
] | f4a0b49f3a11cf9a7414eb17410bf72d23702673 | diff --git a/hvplot/ui.py b/hvplot/ui.py
--- a/hvplot/ui.py
+++ b/hvplot/ui.py
@@ -30,21 +30,36 @@
def explorer(data, **kwargs):
- """Explore your data by building a plot in a Panel UI component.
+ """Explore your data and design your plot via an interactive user interface.
- This function returns a Panel component that has on the right-side
- hand a plot view and on the left-side hand a number of widgets that
- control the plot.
+ This function returns an interactive Panel component that enable you to quickly change the
+ settings of your plot via widgets.
+
+ Reference: https://hvplot.holoviz.org/getting_started/explorer.html
Parameters
----------
data : pandas.DataFrame
Data structure to explore.
+ kwargs : optional
+ Arguments that `data.hvplot()` would also accept like `kind='bar'`.
Returns
-------
hvplotExporer
- Panel component to explore a dataset.
+ Panel component to explore the data and design your plot.
+
+ Example
+ -------
+
+ >>> import hvplot.pandas
+ >>> import pandas as pd
+ >>> df = pd.DataFrame({"x": [1, 2, 3], "y": [1, 4, 9]})
+ >>> hvplot.explorer(df)
+
+ You can also specify initial values
+
+ >>> hvplot.explorer(df, kind='bar', x='x')
"""
return hvPlotExplorer.from_data(data, **kwargs)
| Enable me to set explorer defaults/ initial values
## Request
Enable me to provide defaults, initial values to `hvplot.explorer`.
```python
hvplot.explorer(df, kind=`bar`, x="name", legend='top')
```
## Motivation
If I have a little bit of insights into my data I might already know sensible defaults/ initial values. Its just friction if I have to navigate 3 dropdowns to set the values in the example above if I use the explorer for that type of data over and over again.
## Additional Ideas
It would be very, very powerful if I could provide an existing hvplot to the explorer and it used its settings as a starting point. I might just want to finish of an existing hvplot quickly by quickly changing the `alpha` or `rot`.
| Ahh. This is already possible to set the initial argumets, but its just not clear from the docstring.
I will **change my request** to make the docstring more clear.

| 2022-08-30T04:36:45 |
|
holoviz/hvplot | 930 | holoviz__hvplot-930 | [
"816"
] | dfdf4ef4474aed79f5bf0c9e3ae725ca64a5b6c9 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -1622,7 +1622,7 @@ def chart(self, element, x, y, data=None):
for c in y:
kdims, vdims = self._get_dimensions([x], [c])
chart = element(data, kdims, vdims).redim(**{c: self.value_label})
- charts.append((c, chart.relabel(**self._relabel)))
+ charts.append((c, chart.relabel(**self._relabel).redim(**self._redim)))
return (self._by_type(charts, self.group_label, sort=False)
.opts(cur_opts, backend='bokeh')
.opts(compat_opts, backend=self._backend_compat))
| diff --git a/hvplot/tests/testcharts.py b/hvplot/tests/testcharts.py
--- a/hvplot/tests/testcharts.py
+++ b/hvplot/tests/testcharts.py
@@ -3,7 +3,7 @@
from unittest import SkipTest, expectedFailure
from parameterized import parameterized
-from holoviews import NdOverlay, Store, dim
+from holoviews import NdOverlay, Store, dim, render
from holoviews.element import Curve, Area, Scatter, Points, Path, HeatMap
from holoviews.element.comparison import ComparisonTestCase
@@ -75,6 +75,23 @@ def test_heatmap_2d_derived_x_and_y(self):
assert plot.vdims == ['temp']
+ def test_xarray_dataset_with_attrs(self):
+ try:
+ import xarray as xr
+ import hvplot.xarray # noqa
+ except ImportError:
+ raise SkipTest('xarray not available')
+
+
+ dset = xr.Dataset(
+ {"u": ("t", [1, 3]), "v": ("t", [4, 2])},
+ coords={"t": ("t", [0, 1], {"long_name": "time", "units": "s"})},
+ )
+ ndoverlay = dset.hvplot.line()
+
+ assert render(ndoverlay, "bokeh").xaxis.axis_label == "time (s)"
+
+
class TestChart2DDask(TestChart2D):
def setUp(self):
| Coordinate label of a multi-element chart from an xarray Dataset lacks long name and units
<details>
<summary>Relevant software versions are collapsed here.</summary>
```
# Name Version Build Channel
bokeh 2.4.3 py39haa95532_0
holoviews 1.15.0 py39haa95532_0
hvplot 0.8.0 py39haa95532_0
python 3.9.12 h6244533_0
xarray 2022.6.0 pyhd8ed1ab_1 conda-forge
```
</details>
Given an `xarray.Dataset` that contains multiple data variables and a coordinate with "long_name" and/or "units" attributes, a multi-element plot created from that dataset labels the coordinate with only the coordinate's name instead of its long name and units:
``` python
In [1]: import holoviews, hvplot.xarray, xarray
In [2]: dset = xarray.Dataset(
...: {"u": ("t", [1, 3]), "v": ("t", [4, 2])},
...: coords={"t": ("t", [0, 1], {"long_name": "time", "units": "s"})},
...: )
In [3]: ndoverlay = dset.hvplot.line()
In [4]: ndoverlay.ddims[0]
Out[4]: Dimension('t')
In [5]: holoviews.render(ndoverlay).xaxis.axis_label
Out[5]: 't'
```
The same is true for an `NdLayout` returned by `dset.hvplot.line(subplots=True)`.
In contrast, a single-element plot created from one data variable has coordinates labeled with the long name and units:
``` python
In [6]: curve = dset["u"].hvplot.line()
In [7]: curve.kdims[0]
Out[7]: Dimension('t', label='time', unit='s')
In [8]: holoviews.render(curve).xaxis.axis_label
Out[8]: 'time (s)'
```
Similarly, a single-element bar plot `dset.hvplot.bar()` of both variables in the dataset honors the long name and units:
``` python
In [9]: dset.hvplot.bar().kdims[0]
Out[9]: Dimension('t', label='time', unit='s')
```
A possible cause is that, whereas the `HoloViewsConverter.single_chart` and other plot methods apply the long name and units stored in `self._redim` by calling the `redim(**self._redim)` method on the chart [[code](https://github.com/holoviz/hvplot/blob/04f6dae12c449987abf69601be47c1e292e1ab3a/hvplot/converter.py#L1504)], the `HoloViewsConverter.chart` method lacks such a call for the multi-element case [[code](https://github.com/holoviz/hvplot/blob/04f6dae12c449987abf69601be47c1e292e1ab3a/hvplot/converter.py#L1604-L1610)]. Could the call be included in the `HoloViewsConverter.chart` method?
To work around the difference, the user may re-dimension the returned overlay, although converting the attributes from the dataset is a bit verbose:
``` python
In [10]: labeled_ndoverlay = ndoverlay.redim(**{"t": {
...: hname: dset["t"].attrs[xname]
...: for hname, xname in [("label", "long_name"), ("unit", "units")]
...: }})
In [11]: labeled_ndoverlay.ddims[0]
Out[11]: Dimension('t', label='time', unit='s')
```
| 2022-10-11T16:18:28 |
|
holoviz/hvplot | 966 | holoviz__hvplot-966 | [
"965"
] | e98dfb18856d1125dd1d96b6f51026e75e54c9be | diff --git a/hvplot/ui.py b/hvplot/ui.py
--- a/hvplot/ui.py
+++ b/hvplot/ui.py
@@ -178,13 +178,13 @@ def __init__(self, data, **params):
@param.depends('explorer.xlim', 'explorer.ylim', watch=True)
def _update_ranges(self):
xlim = self.explorer.xlim()
- if xlim is not None and is_number(xlim[0]) and is_number(xlim[1]):
+ if xlim is not None and is_number(xlim[0]) and is_number(xlim[1]) and xlim[0] != xlim[1]:
self.param.xlim.precedence = 0
self.param.xlim.bounds = xlim
else:
self.param.xlim.precedence = -1
ylim = self.explorer.ylim()
- if ylim is not None and is_number(ylim[0]) and is_number(ylim[1]):
+ if ylim is not None and is_number(ylim[0]) and is_number(ylim[1]) and ylim[0] != ylim[1]:
self.param.ylim.precedence = 0
self.param.ylim.bounds = ylim
else:
| Explorer raises error when all values are the same
The sliders used for xlim/ylim fails if all the values of x/y values are the same. I would rather see the sliders disabled for this case and everything work as "normal".
``` python
import numpy as np
import pandas as pd
data = dict(
x=np.ones(10),
y=np.ones(10) + 10,
)
df = pd.DataFrame(data)
hvplot.explorer(df)
```
``` python-traceback
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
File ~/miniconda3/envs/holoviz/lib/python3.10/site-packages/IPython/core/formatters.py:972, in MimeBundleFormatter.__call__(self, obj, include, exclude)
969 method = get_real_method(obj, self.print_method)
971 if method is not None:
--> 972 return method(include=include, exclude=exclude)
973 return None
974 else:
File ~/Development/holoviz/repos/panel/panel/viewable.py:931, in Viewer._repr_mimebundle_(self, include, exclude)
930 def _repr_mimebundle_(self, include=None, exclude=None):
--> 931 return self._create_view()._repr_mimebundle_(include, exclude)
File ~/Development/holoviz/repos/panel/panel/viewable.py:674, in Viewable._repr_mimebundle_(self, include, exclude)
672 doc = Document()
673 comm = state._comm_manager.get_server_comm()
--> 674 model = self._render_model(doc, comm)
675 ref = model.ref['id']
676 manager = CommManager(comm_id=comm.id, plot_id=ref)
File ~/Development/holoviz/repos/panel/panel/viewable.py:508, in Renderable._render_model(self, doc, comm)
506 if comm is None:
507 comm = state._comm_manager.get_server_comm()
--> 508 model = self.get_root(doc, comm)
510 if config.embed:
511 embed_state(self, model, doc,
512 json=config.embed_json,
513 json_prefix=config.embed_json_prefix,
514 save_path=config.embed_save_path,
515 load_path=config.embed_load_path,
516 progress=False)
File ~/Development/holoviz/repos/panel/panel/viewable.py:559, in Renderable.get_root(self, doc, comm, preprocess)
542 """
543 Returns the root model and applies pre-processing hooks
544
(...)
556 Returns the bokeh model corresponding to this panel object
557 """
558 doc = init_doc(doc)
--> 559 root = self._get_model(doc, comm=comm)
560 if preprocess:
561 self._preprocess(root)
File ~/Development/holoviz/repos/panel/panel/layout/base.py:146, in Panel._get_model(self, doc, root, parent, comm)
144 if root is None:
145 root = model
--> 146 objects = self._get_objects(model, [], doc, root, comm)
147 props = dict(self._init_params(), objects=objects)
148 model.update(**self._process_param_change(props))
File ~/Development/holoviz/repos/panel/panel/layout/base.py:131, in Panel._get_objects(self, model, old_objects, doc, root, comm)
129 else:
130 try:
--> 131 child = pane._get_model(doc, root, model, comm)
132 except RerenderError:
133 return self._get_objects(model, current_objects[:i], doc, root, comm)
File ~/Development/holoviz/repos/panel/panel/layout/base.py:146, in Panel._get_model(self, doc, root, parent, comm)
144 if root is None:
145 root = model
--> 146 objects = self._get_objects(model, [], doc, root, comm)
147 props = dict(self._init_params(), objects=objects)
148 model.update(**self._process_param_change(props))
File ~/Development/holoviz/repos/panel/panel/layout/base.py:131, in Panel._get_objects(self, model, old_objects, doc, root, comm)
129 else:
130 try:
--> 131 child = pane._get_model(doc, root, model, comm)
132 except RerenderError:
133 return self._get_objects(model, current_objects[:i], doc, root, comm)
File ~/Development/holoviz/repos/panel/panel/layout/base.py:146, in Panel._get_model(self, doc, root, parent, comm)
144 if root is None:
145 root = model
--> 146 objects = self._get_objects(model, [], doc, root, comm)
147 props = dict(self._init_params(), objects=objects)
148 model.update(**self._process_param_change(props))
File ~/Development/holoviz/repos/panel/panel/layout/tabs.py:204, in Tabs._get_objects(self, model, old_objects, doc, root, comm)
202 else:
203 try:
--> 204 rendered[pref] = child = pane._get_model(doc, root, model, comm)
205 except RerenderError:
206 return self._get_objects(model, current_objects[:i], doc, root, comm)
File ~/Development/holoviz/repos/panel/panel/param.py:683, in Param._get_model(self, doc, root, parent, comm)
679 def _get_model(
680 self, doc: Document, root: Optional[Model] = None,
681 parent: Optional[Model] = None, comm: Optional[Comm] = None
682 ) -> Model:
--> 683 model = self.layout._get_model(doc, root, parent, comm)
684 self._models[root.ref['id']] = (model, parent)
685 return model
File ~/Development/holoviz/repos/panel/panel/layout/base.py:146, in Panel._get_model(self, doc, root, parent, comm)
144 if root is None:
145 root = model
--> 146 objects = self._get_objects(model, [], doc, root, comm)
147 props = dict(self._init_params(), objects=objects)
148 model.update(**self._process_param_change(props))
File ~/Development/holoviz/repos/panel/panel/layout/base.py:131, in Panel._get_objects(self, model, old_objects, doc, root, comm)
129 else:
130 try:
--> 131 child = pane._get_model(doc, root, model, comm)
132 except RerenderError:
133 return self._get_objects(model, current_objects[:i], doc, root, comm)
File ~/Development/holoviz/repos/panel/panel/widgets/base.py:98, in Widget._get_model(self, doc, root, parent, comm)
94 def _get_model(
95 self, doc: Document, root: Optional[Model] = None,
96 parent: Optional[Model] = None, comm: Optional[Comm] = None
97 ) -> Model:
---> 98 model = self._widget_type(**self._process_param_change(self._init_params()))
99 if root is None:
100 root = model
File ~/miniconda3/envs/holoviz/lib/python3.10/site-packages/bokeh/models/widgets/sliders.py:75, in AbstractSlider.__init__(self, **kwargs)
73 if 'start' in kwargs and 'end' in kwargs:
74 if kwargs['start'] == kwargs['end']:
---> 75 raise ValueError("Slider 'start' and 'end' cannot be equal.")
77 if "value" in kwargs and "value_throttled" not in kwargs:
78 kwargs["value_throttled"] = kwargs["value"]
ValueError: Slider 'start' and 'end' cannot be equal.
```
| 2022-11-15T18:22:37 |
||
holoviz/hvplot | 990 | holoviz__hvplot-990 | [
"988"
] | 877cdf0f206ae3fcb3a7ed30eea4a1935c92b9f8 | diff --git a/hvplot/converter.py b/hvplot/converter.py
--- a/hvplot/converter.py
+++ b/hvplot/converter.py
@@ -851,7 +851,7 @@ def _process_data(self, kind, data, x, y, by, groupby, row, col,
'e.g. a NumPy array or xarray Dataset, '
'found %s type' % (kind, type(self.data).__name__))
- if hasattr(data, 'columns') and data.columns.name and not group_label:
+ if hasattr(data, 'columns') and hasattr(data.columns, 'name') and data.columns.name and not group_label:
group_label = data.columns.name
elif not group_label:
group_label = 'Variable'
diff --git a/hvplot/ibis.py b/hvplot/ibis.py
--- a/hvplot/ibis.py
+++ b/hvplot/ibis.py
@@ -4,8 +4,8 @@ def patch(name='hvplot', extension='bokeh', logo=False):
try:
import ibis
except:
- raise ImportError('Could not patch plotting API onto dask. '
- 'Dask could not be imported.')
+ raise ImportError('Could not patch plotting API onto ibis. '
+ 'Ibis could not be imported.')
_patch_plot = lambda self: hvPlotTabular(self)
_patch_plot.__doc__ = hvPlotTabular.__call__.__doc__
patch_property = property(_patch_plot)
| diff --git a/hvplot/tests/testibis.py b/hvplot/tests/testibis.py
new file mode 100644
--- /dev/null
+++ b/hvplot/tests/testibis.py
@@ -0,0 +1,22 @@
+"""Ibis works with hvplot"""
+import pytest
+
+try:
+ import ibis
+ import hvplot.ibis # noqa
+ import pandas as pd
+except:
+ pytest.skip(allow_module_level=True)
+
+
[email protected]
+def table():
+ df = pd.DataFrame({
+ "x": [pd.Timestamp("2022-01-01"), pd.Timestamp("2022-01-02")], "y": [1,2]
+ })
+ con = ibis.pandas.connect({"df": df})
+ return con.table("df")
+
+def test_can_hvplot(table):
+ """hvplot works with Ibis"""
+ table.hvplot(x="x", y="y")
| Ibis backend not working: 'list' object has no attribute 'name'
hvplot==0.8.2, duckdb=0.6.0, ibis-framework[duckdb]==3.2.0
I would like to try out combining duckdb and hvplot. It seems this should be possible via `ibis`. But when I try I get the below error.
```python
from pathlib import Path
import duckdb
import holoviews as hv
import ibis
import panel as pn
import wget
import hvplot.ibis
pn.extension(sizing_mode="stretch_width")
FILE = 'yellow_tripdata_2022-01.parquet'
PATH = Path(FILE)
URL = f"https://d37ci6vzurychx.cloudfront.net/trip-data/{FILE}"
DB = 'trip-data.duckdb'
TABLE = 'trip_data'
if not PATH.exists():
wget.download(URL, FILE)
con = duckdb.connect(database=DB, read_only=False)
QUERY = f"CREATE TABLE {TABLE} AS SELECT * FROM read_parquet('{FILE}');"
con.execute(QUERY)
con = ibis.duckdb.connect(database=DB, read_only=False)
trip_data = con.table(TABLE)
trip_data = trip_data.head(1000).sort_by("tpep_pickup_datetime")
plot = trip_data.hvplot(x="tpep_pickup_datetime", y="trip_distance")
```
```bash
$ python script.py
C:\repos\private\hvplot\.venv\lib\site-packages\ibis\backends\postgres\registry.py:164: UserWarning: locale specific date formats (%c, %x, %X) are not yet implemented for Windows
warnings.warn(
C:\repos\private\hvplot\.venv\lib\site-packages\duckdb_engine\__init__.py:229: DuckDBEngineWarning: duckdb-engine doesn't yet support reflection on indices
warnings.warn(
Traceback (most recent call last):
File "C:\repos\private\hvplot\script.py", line 31, in <module>
plot = trip_data.hvplot(x="tpep_pickup_datetime", y="trip_distance")
File "C:\repos\private\hvplot\hvplot\plotting\core.py", line 92, in __call__
return self._get_converter(x, y, kind, **kwds)(kind, x, y)
File "C:\repos\private\hvplot\hvplot\plotting\core.py", line 99, in _get_converter
return HoloViewsConverter(self._data, x, y, kind=kind, **params)
File "C:\repos\private\hvplot\hvplot\converter.py", line 388, in __init__
self._process_data(
File "C:\repos\private\hvplot\hvplot\converter.py", line 854, in _process_data
if hasattr(data, 'columns') and data.columns.name and not group_label:
AttributeError: 'list' object has no attribute 'name'
```
| 2022-11-26T08:33:56 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.